From 3c8e816b2ff53b8a166d37ab26b9f7b5cd410aae Mon Sep 17 00:00:00 2001 From: Rohith Surya Podugu Date: Thu, 13 Jun 2024 09:39:10 -0700 Subject: [PATCH] Rename classes, variables and methods according to PEP-8 conventions --- examples/work_unit_hello_world.py | 19 +- pyhpcc/models.py | 582 +++++++++++++++--------------- pyhpcc/roxie_binder.py | 10 +- pyhpcc/utils.py | 101 +++--- tests/test_hpcc_api.py | 144 ++++---- 5 files changed, 431 insertions(+), 425 deletions(-) diff --git a/examples/work_unit_hello_world.py b/examples/work_unit_hello_world.py index ab66083..31c6743 100644 --- a/examples/work_unit_hello_world.py +++ b/examples/work_unit_hello_world.py @@ -1,7 +1,8 @@ import os -from pyhpcc.auth import auth -from pyhpcc.models import hpcc -from pyhpcc.models import workunit_submit as ws + +from pyhpcc.auth import Auth +from pyhpcc.models import HPCC +from pyhpcc.models import WorkunitSubmit as ws # Configurations environment = "" # Eg: myuniversity.hpccsystems.io @@ -17,7 +18,7 @@ working_folder = os.getcwd() # Folder to generate .ecl, .eclxml, .eclxml.xml try: - auth_object = auth( + auth_object = Auth( environment, port, user_name, @@ -25,17 +26,17 @@ require_auth=required_auth, protocol=protocol, ) - hpcc_object = hpcc(auth=auth_object) + hpcc_object = HPCC(auth=auth_object) work_s = ws(hpcc_object, cluster, cluster) - file_name = work_s.create_filename( - QueryText=ecl_query, working_folder=working_folder, Jobname=job_name + file_name = work_s.create_file_name( + query_text=ecl_query, working_folder=working_folder, Jobname=job_name ) - output, output_file = work_s.bash_compile(filename=file_name, gitrepository="") + output, output_file = work_s.bash_compile(file_name=file_name, git_repository="") if str(output).find("error") == -1: output, error = work_s.bash_run(output_file, cluster=cluster) index = str(output).find("running") wuid = str(output)[: index - 1] - _ = work_s.WUWaitComplete(wuid) + _ = work_s.wu_wait_complete(wuid) else: print(str(output)) print(f"{wuid} submitted successfully") diff --git a/pyhpcc/models.py b/pyhpcc/models.py index 9d79105..72c5b43 100644 --- a/pyhpcc/models.py +++ b/pyhpcc/models.py @@ -1,16 +1,18 @@ -import subprocess -import os import json import logging +import os +import subprocess + import requests -from pyhpcc.thor_binder import wrapper as thor_wrapper -from pyhpcc.roxie_binder import wrapper as roxie_wrapper -from pyhpcc.errors import HPCCException + import pyhpcc.config as conf import pyhpcc.utils as utils +from pyhpcc.errors import HPCCException +from pyhpcc.roxie_binder import wrapper as roxie_wrapper +from pyhpcc.thor_binder import wrapper as thor_wrapper -class hpcc(object): +class HPCC(object): """ Base class for HPCC THOR API. @@ -25,88 +27,88 @@ class hpcc(object): Methods: ------- - get_wuinfo: + get_wu_info: Get the workunit information - get_wuresult: + get_wu_result: Get the workunit result - getdfuInfo: + get_dfu_info: Get the DFU information - wuCreateAndUpdate: + wu_create_and_update: Create and update the workunit - wuSubmit: + wu_submit: Submit the workunit - wuRun: + wu_run: Run the workunit - get_wuquery: + get_wu_query: Get the workunit query - wuQuery: + wu_query: Query the workunits using filters - fileQuery: + file_query: Query the files using filters - getFileInfo: + get_file_info: Get the file information - WUWaitCompiled: + wu_wait_compiled: Wait for the workunit to be compiled - WUWaitComplete: + wu_wait_complete: Wait for the workunit to be completed - getSubFileInfo: + get_subfile_info: Get the subfile information - checkFileExists: + check_file_exists: Check if the file exists - TpClusterInfo: + tp_cluster_info: Get the cluster information - Activity: + activity: Get the activity information - UploadFile: + upload_file: Upload the file - DropZoneFiles: + drop_zone_files: Get the dropzone files - dfuQuery: + dfu_query: Query the DFU files using filters - getDfuWorkunitInfo: + get_dfu_workunit_info: Get the DFU workunit information - getDfuWorkunits: + get_dfu_workunits: Get the DFU workunits - sprayVariable: + spray_variable: Spray a file of variable length records - sprayFixed: + spray_fixed: Spray a file of fixed format - WUUpdate: + wu_update: Update the workunit - getgraph: + get_graph: Get the graph information - downloadfile: + download_file: Download the file - AddtoSuperfileRequest: + add_to_superfile_request: Add the file to the superfile - fileList: + file_list: Get the file list """ @@ -117,7 +119,7 @@ def __init__(self, auth, timeout=1200, response_type="json"): self.response_type = response_type @property - def get_wuinfo(self): + def get_wu_info(self): """Get information about a workunit""" return thor_wrapper( api=self, @@ -144,7 +146,7 @@ def get_wuinfo(self): ) @property - def get_wuresult(self): + def get_wu_result(self): """Get the results of a workunit""" return thor_wrapper( api=self, @@ -164,7 +166,7 @@ def get_wuresult(self): ) @property - def getdfuInfo(self): + def get_dfu_info(self): """Get information about a file""" return thor_wrapper( api=self, @@ -180,7 +182,7 @@ def getdfuInfo(self): ) @property - def wuCreateAndUpdate(self): + def wu_create_and_update(self): """Create and update a workunit""" return thor_wrapper( api=self, @@ -215,7 +217,7 @@ def wuCreateAndUpdate(self): ) @property - def wuSubmit(self): + def wu_submit(self): """Submit a workunit""" return thor_wrapper( api=self, @@ -234,7 +236,7 @@ def wuSubmit(self): ) @property - def wuRun(self): + def wu_run(self): """Run a workunit""" return thor_wrapper( api=self, @@ -257,7 +259,7 @@ def wuRun(self): ) @property - def get_wuquery(self): + def get_wu_query(self): """Get the ECL query of a workunit""" return thor_wrapper( api=self, @@ -291,7 +293,7 @@ def get_wuquery(self): ) @property - def wuQuery(self): + def wu_query(self): """Query workunits using filters""" return thor_wrapper( api=self, @@ -325,7 +327,7 @@ def wuQuery(self): ) @property - def fileQuery(self): + def file_query(self): """Query files using filters""" return thor_wrapper( api=self, @@ -350,7 +352,7 @@ def fileQuery(self): ) @property - def getFileInfo(self): + def get_file_info(self): """Get information about a file""" return thor_wrapper( api=self, @@ -360,7 +362,7 @@ def getFileInfo(self): ) @property - def WUWaitCompiled(self): + def wu_wait_compiled(self): """Wait for a workunit to compile""" return thor_wrapper( api=self, @@ -370,7 +372,7 @@ def WUWaitCompiled(self): ) @property - def WUWaitComplete(self): + def wu_wait_complete(self): """Wait for a workunit to complete""" return thor_wrapper( api=self, @@ -380,14 +382,14 @@ def WUWaitComplete(self): ) @property - def getSubFileInfo(self): + def get_subfile_info(self): """Get information about a subfile""" return thor_wrapper( api=self, path="WsDfu/DFUInfo", payload_list=True, allowed_param=["Name"] ) @property - def checkFileExists(self): + def check_file_exists(self): """Check if a file exists""" return thor_wrapper( api=self, @@ -397,7 +399,7 @@ def checkFileExists(self): ) @property - def TpClusterInfo(self): + def tp_cluster_info(self): """Get information about a cluster""" return thor_wrapper( api=self, @@ -407,7 +409,7 @@ def TpClusterInfo(self): ) @property - def Activity(self): + def activity(self): """Get information about a workunit activity""" return thor_wrapper( api=self, @@ -417,7 +419,7 @@ def Activity(self): ) @property - def UploadFile(self): + def upload_file(self): """Upload a file to the HPCC""" return thor_wrapper( api=self, @@ -427,7 +429,7 @@ def UploadFile(self): ) @property - def DropZoneFiles(self): + def drop_zone_files(self): """Get information about files in a dropzone""" return thor_wrapper( api=self, @@ -437,7 +439,7 @@ def DropZoneFiles(self): ) @property - def dfuQuery(self): + def dfu_query(self): """Query files using filters""" return thor_wrapper( api=self, @@ -468,7 +470,7 @@ def dfuQuery(self): ) @property - def getDfuWorkunitInfo(self): + def get_dfu_workunit_info(self): """Get information about a DFU workunit""" return thor_wrapper( api=self, @@ -478,7 +480,7 @@ def getDfuWorkunitInfo(self): ) @property - def getDfuWorkunits(self): + def get_dfu_workunits(self): """Get information about DFU workunits""" return thor_wrapper( api=self, @@ -501,7 +503,7 @@ def getDfuWorkunits(self): ) @property - def sprayVariable(self): + def spray_variable(self): """Spray a file to HPCC""" return thor_wrapper( api=self, @@ -544,7 +546,7 @@ def sprayVariable(self): ) @property - def sprayFixed(self): + def spray_fixed(self): """Spray a fixed file to HPCC""" return thor_wrapper( api=self, @@ -579,7 +581,7 @@ def sprayFixed(self): ) @property - def WUUpdate(self): + def wu_update(self): """Update a workunit""" return thor_wrapper( api=self, @@ -614,7 +616,7 @@ def WUUpdate(self): ) @property - def getgraph(self): + def get_graph(self): """Get a graph from a workunit""" return thor_wrapper( api=self, @@ -624,7 +626,7 @@ def getgraph(self): ) @property - def downloadfile(self): + def download_file(self): """Download a file from the HPCC""" return thor_wrapper( api=self, @@ -634,7 +636,7 @@ def downloadfile(self): ) @property - def AddtoSuperfileRequest(self): + def add_to_superfile_request(self): """Add a file to a superfile""" return thor_wrapper( api=self, @@ -644,7 +646,7 @@ def AddtoSuperfileRequest(self): ) @property - def fileList(self): + def file_list(self): """List files in a directory""" return thor_wrapper( api=self, @@ -655,7 +657,7 @@ def fileList(self): ) -class roxie(object): +class Roxie(object): """ Base class for HPCC Roxie API @@ -669,14 +671,14 @@ class roxie(object): Type of response to return definition: Definition of the API - searchservice: + search_service: Search service object - roxieport: + roxie_port: Roxie port Methods ------- - __init__(auth, timeout, response_type, definition, searchservice, roxieport) + __init__(auth, timeout, response_type, definition, search_service, roxie_port) Initialize the class roxie_call(self) @@ -686,7 +688,7 @@ class roxie(object): def __init__( self, auth, - searchservice, + search_service, roxie_port, timeout=1200, response_type="json", @@ -696,7 +698,7 @@ def __init__( self.timeout = timeout self.response_type = response_type self.definition = "WsEcl/" + definition + "/query" - self.searchservice = searchservice + self.search_service = search_service self.roxie_port = roxie_port @property @@ -717,7 +719,7 @@ def roxie_call(self): return roxie_wrapper(api=self) -class workunit_submit(object): +class WorkunitSubmit(object): """ Base class for HPCC workunit submit @@ -738,13 +740,13 @@ class workunit_submit(object): write_file: Write a file to HPCC - get_bashcommand: + get_bash_command: Get the bash command to submit a workunit - get_workload: + get_work_load: Get the workload on the clusters - create_filename: + create_file_name: Create a filename for the workunit bash_compile: @@ -753,19 +755,19 @@ class workunit_submit(object): bash_run: Run the workunit - compileworkunit: + compile_workunit: Legacy function to compile the workunit - createworkunit: + create_workunit: Legacy function to create the workunit - WUWaitCompiled: + wu_wait_compiled: Legacy function to wait for the workunit to compile - WUWaitComplete: + wu_wait_complete: Legacy function to wait for the workunit to complete - runworkunit: + run_workunit: Legacy function to run the workunit """ @@ -775,21 +777,21 @@ def __init__(self, hpcc, cluster1="", cluster2=""): self.cluster2 = cluster2 self.stateid = conf.WORKUNIT_STATE_MAP - def write_file(self, querytext, folder, jobname): + def write_file(self, query_text, folder, job_name): """Write a .ecl file to disk Parameters ---------- - querytext: + query_text: The ecl query to write folder: The folder to write the file to - jobname: + job_name: The name of the ecl file Returns ------- - filename: + file_name: The name of the ecl file written Raises @@ -798,31 +800,31 @@ def write_file(self, querytext, folder, jobname): A generic exception """ try: - words = jobname.split() - jobname = "_".join(words) - filename = os.path.join(folder, jobname + ".ecl") - f = open(filename, "w") - f.write(querytext) + words = job_name.split() + job_name = "_".join(words) + file_name = os.path.join(folder, job_name + ".ecl") + f = open(file_name, "w") + f.write(query_text) f.close - return filename + return file_name except Exception as e: raise HPCCException("Could not write file: " + str(e)) - def get_bashcommand(self, filename, repository): + def get_bash_command(self, file_name, repository): """Get the bash command to compile the ecl file Parameters ---------- - filename: + file_name: The name of the ecl file repository: Git repository to use Returns ------- - bashcommand: + bash_command: The bash command to compile the ecl file - outputfile: + output_file: The name of the compiled ecl file - filename.eclxml Raises @@ -831,15 +833,15 @@ def get_bashcommand(self, filename, repository): A generic exception """ try: - outputfile = utils.create_compile_file_name(filename) - bashcommand = utils.create_compile_bash_command( - repository, outputfile, filename + output_file = utils.create_compile_file_name(file_name) + bash_command = utils.create_compile_bash_command( + repository, output_file, file_name ) - return bashcommand, outputfile + return bash_command, output_file except Exception as e: raise HPCCException("Could not get bash command: " + str(e)) - def get_workload(self): + def get_work_load(self): """Get the workload for the given two HPCC clusters Parameters @@ -860,7 +862,7 @@ def get_workload(self): try: payload = {"SortBy": "Name", "Descending": 1} - resp = self.hpcc.Activity(**payload).json() + resp = self.hpcc.activity(**payload).json() len1 = 0 len2 = 0 if "Running" in list(resp["ActivityResponse"].keys()): @@ -876,21 +878,21 @@ def get_workload(self): except Exception as e: raise HPCCException("Could not get workload: " + str(e)) - def create_filename(self, QueryText, working_folder, Jobname): + def create_file_name(self, query_text, working_folder, job_name): """Create a filename for the ecl file Parameters ---------- - QueryText: + query_text: The ecl query working_folder: The folder to write the file to - Jobname: + job_name: The name of the ecl file Returns ------- - filename: + file_name: The name of the ecl file Raises @@ -899,26 +901,26 @@ def create_filename(self, QueryText, working_folder, Jobname): A generic exception """ try: - self.Jobname = Jobname - return self.write_file(QueryText, working_folder, Jobname) + self.job_name = job_name + return self.write_file(query_text, working_folder, job_name) except Exception as e: - raise HPCCException("Could not create filename: " + str(e)) + raise HPCCException("Could not create file name: " + str(e)) - def bash_compile(self, filename, gitrepository): + def bash_compile(self, file_name, git_repository): """Compile the ecl file Parameters ---------- - filename: + file_name: The name of the ecl file - gitrepository: + git_repository: Git repository to use Returns ------- output: The output from the bash command - outputfile: + output_file: The name of the compiled ecl file - filename.eclxml Raises @@ -927,21 +929,21 @@ def bash_compile(self, filename, gitrepository): A generic exception """ try: - bashcommand, outputfile = self.get_bashcommand(filename, gitrepository) + bash_command, output_file = self.get_bash_command(file_name, git_repository) process = subprocess.Popen( - bashcommand.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT + bash_command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) output, error = process.communicate() - return output, outputfile + return output, output_file except Exception as e: raise HPCCException("Could not compile: " + str(e)) - def bash_run(self, compiledfile, cluster): + def bash_run(self, compiled_file, cluster): """Run the compiled ecl file Parameters ---------- - compiledfile: + compiled_file: The name of the compiled ecl file cluster: The HPCC cluster to run the query on @@ -959,24 +961,24 @@ def bash_run(self, compiledfile, cluster): try: # Select the cluster to run the query on if cluster == "": - len1, len2 = self.get_workload() + len1, len2 = self.get_work_load() if len2 > len1: cluster = self.cluster1 else: cluster = self.cluster2 - self.Jobname = self.Jobname.replace(" ", "_") - bashcommand = utils.create_run_bash_command( - compiledfile, + self.job_name = self.job_name.replace(" ", "_") + bash_command = utils.create_run_bash_command( + compiled_file, cluster, self.hpcc.auth.ip, self.hpcc.auth.port, self.hpcc.auth.oauth[0], self.hpcc.auth.oauth[1], - self.Jobname, + self.job_name, ) process = subprocess.Popen( - bashcommand.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT + bash_command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) output, error = process.communicate() @@ -984,72 +986,72 @@ def bash_run(self, compiledfile, cluster): except Exception as e: raise HPCCException("Could not run: " + str(e)) - def compileworkunit(self, Wuid, Cluster=""): + def compile_workunit(self, wuid, cluster=""): """Legacy function to compile a workunit - use bash_compile instead Parameters ---------- - Wuid: + wuid: The Wuid of the workunit to compile - Cluster: + cluster: The HPCC cluster to run the query on """ - if Cluster == "": - len1, len2 = self.get_workload() + if cluster == "": + len1, len2 = self.get_work_load() if len2 > len1: - Cluster = self.cluster1 + cluster = self.cluster1 else: - Cluster = self.cluster2 - self.hpcc.wuSubmit(Wuid=Wuid, Cluster=Cluster) + cluster = self.cluster2 + self.hpcc.wu_submit(Wuid=wuid, Cluster=cluster) try: - w3 = self.hpcc.WUWaitCompiled(Wuid=Wuid) + w3 = self.hpcc.wu_wait_compiled(Wuid=wuid) except requests.exceptions.Timeout: - w3 = self.WUWaitCompiled(Wuid=Wuid) + w3 = self.wu_wait_compiled(wuid=wuid) w3 = json.loads(w3.text) return w3["WUWaitResponse"]["StateID"] else: w3 = json.loads(w3.text) return w3["WUWaitResponse"]["StateID"] - def createworkunit( - self, Action, ResultLimit, QueryText, Jobname, ClusterOrig="", data="" + def create_workunit( + self, action, result_limit, query_text, job_name, cluster_orig="", data="" ): """Legacy function to create a workunit - use bash_run instead Parameters ---------- - Action: + action: The action to perform - ResultLimit: + result_limit: The number of results to return - QueryText: + query_text: The ecl query - Jobname: + job_name: The name of the ecl file - ClusterOrig: + cluster_orig: The HPCC cluster to run the query on data: The data to pass to the query """ - if ClusterOrig == "": - len1, len2 = self.get_workload() + if cluster_orig == "": + len1, len2 = self.get_work_load() if len2 > len1: - ClusterOrig = self.cluster1 + cluster_orig = self.cluster1 else: - ClusterOrig = self.cluster2 - if QueryText is None: + cluster_orig = self.cluster2 + if query_text is None: data = {"QueryText": data} kwargs = {"data": data} else: - data = {"QueryText": QueryText} + data = {"QueryText": query_text} kwargs = {"data": data} - resp = self.hpcc.wuCreateAndUpdate( - Action=Action, - ResultLimit=ResultLimit, - Jobname=Jobname, - ClusterOrig=ClusterOrig, + resp = self.hpcc.wu_create_and_update( + Action=action, + ResultLimit=result_limit, + Jobname=job_name, + ClusterOrig=cluster_orig, **kwargs, ) @@ -1065,7 +1067,7 @@ def createworkunit( else: raise ("workunit id not created") - def WUWaitCompiled(self, Wuid): + def wu_wait_compiled(self, wuid): """Legacy function to wait for a workunit to compile Parameters @@ -1075,55 +1077,55 @@ def WUWaitCompiled(self, Wuid): """ try: logging.info( - "session timeout for WUWaitCompiled, starting new session for WUWaitComplete" + "session timeout for wu_wait_compiled, starting new session for wu_wait_complete" ) - w4 = self.hpcc.WUWaitCompiled(Wuid=Wuid) + w4 = self.hpcc.wu_wait_compiled(Wuid=wuid) except requests.exceptions.Timeout: - w4 = self.WUWaitCompiled(Wuid=Wuid) + w4 = self.wu_wait_compiled(wuid=wuid) return w4 else: return w4 - def WUWaitComplete(self, Wuid): + def wu_wait_complete(self, wuid): """Legacy function to wait for a workunit to complete Parameters ---------- - Wuid: + wuid: The Wuid of the workunit to compile """ try: logging.info( - "session timeout for WuRun, starting new session for WUWaitComplete" + "session timeout for WuRun, starting new session for wu_wait_complete" ) - w4 = self.hpcc.WUWaitComplete(Wuid=Wuid) + w4 = self.hpcc.wu_wait_complete(Wuid=wuid) except requests.exceptions.Timeout: - w4 = self.WUWaitComplete(Wuid=Wuid) + w4 = self.wu_wait_complete(wuid=wuid) return w4 else: return w4 - def runworkunit(self, Wuid, Cluster=""): + def run_workunit(self, wuid, cluster=""): """Legacy function to run a workunit - use bash_run instead Parameters ---------- - Wuid: + wuid: The Wuid of the workunit to compile - Cluster: + cluster: The HPCC cluster to run the query on """ - if Cluster == "": - len1, len2 = self.get_workload() + if cluster == "": + len1, len2 = self.get_work_load() if len2 > len1: - Cluster = self.cluster1 + cluster = self.cluster1 else: - Cluster = self.cluster2 + cluster = self.cluster2 try: - w4 = self.hpcc.wuRun(Wuid=Wuid, Cluster=Cluster, Variables=[]) + w4 = self.hpcc.wu_run(Wuid=wuid, Cluster=cluster, Variables=[]) except requests.exceptions.Timeout: - w4 = self.WUWaitComplete(Wuid=Wuid) + w4 = self.wu_wait_complete(wuid=wuid) w4 = w4.json() return w4["WUWaitResponse"]["StateID"] @@ -1133,7 +1135,7 @@ def runworkunit(self, Wuid, Cluster=""): return self.stateid[state] -class readfileinfo(object): +class ReadFileInfo(object): """ Class to read the file information from the HPCC cluster @@ -1141,165 +1143,165 @@ class readfileinfo(object): ---------- hpcc: The hpcc object - logicalFileName: + logical_file_name: The logical file name cluster: The cluster to read the file information from - fileType: + file_type: The file type - fileSizelimit: + file_size_limit: The file size limit. Defaults to 25MB - ifExists: + if_exists: Boolean to determine if the file exists - isSuperFile: + is_superfile: Boolean to determine if the file is a superfile - actualFileSize: + actual_file_size: The actual file size - recordCount: + record_count: The number of records in the file - desprayIP: + despray_ip: The IP address to despray the file to - desprayPath: + despray_path: The path to despray the file to - desprayallowoverwrite: + despray_allow_overwrite: Boolean to determine if the file can be overwritten. Defaults to True - shouldDespray: + should_despray: Boolean to determine if the file should be desprayed. Defaults to False - checkStatus: + check_status: Boolean to determine if the file status should be checked. Defaults to False - csvSeperatorforRead: + csv_separator_for_read: The csv seperator for reading the file. Defaults to ',' - readStatus: + read_status: The read status. Defaults to 'Not Read' - desprayFromCluster: + despray_from_cluster: The cluster to despray the file from - csvHeaderFlag: + csv_header_flag: Int to determine if the file has a csv header. Defaults to 0 Methods ------- - checkIfFileExistsAndIsSuperFile: + check_if_file_exists_and_is_super_file: Checks if the file exists and is a superfile - setFilename: + set_file_name: Sets the logical file name - getSubFileInformation: + get_sub_file_information: Gets the subfile information - checkfileinDFU: + check_file_in_dfu: Checks the file in the DFU queue - getData: + get_data: Gets the data from the file """ def __init__( self, hpcc, - logicalFileName, + logical_file_name, cluster, - fileType, - fileSizelimit=25, - ifExists=-1, - isSuperFile=-1, - actualFileSize=-1, - recordCount=-1, - desprayIP="", - desprayPath="", - desprayallowoverwrite="true", - shouldDespray=False, - checkStatus=False, - csvSeperatorforRead=",", - readStatus="Not read", - desprayFromCluster="", - csvHeaderFlag=0, + file_type, + file_size_limit=25, + if_exists=-1, + is_superfile=-1, + actual_file_size=-1, + record_count=-1, + despray_ip="", + despray_path="", + despray_allow_overwrite="true", + should_despray=False, + check_status=False, + csv_separator_for_read=",", + read_status="Not read", + despray_from_cluster="", + csv_header_flag=0, ): - """Constructor for the readfileinfo class""" + """Constructor for the ReadFileInfo class""" self.hpcc = hpcc - self.logicalFileName = logicalFileName + self.logical_file_name = logical_file_name self.cluster = cluster - self.fileSizelimit = fileSizelimit - self.fileType = fileType - self.ifExists = ifExists - self.isSuperFile = isSuperFile - self.actualFileSize = actualFileSize - self.recordCount = recordCount - self.desprayIP = desprayIP - self.desprayPath = desprayPath - self.desprayallowoverwrite = desprayallowoverwrite - self.shouldDespray = shouldDespray - self.checkStatus = checkStatus - self.csvSeperatorforRead = csvSeperatorforRead - self.readStatus = readStatus - self.desprayFromCluster = desprayFromCluster - self.csvHeaderFlag = csvHeaderFlag - - def checkIfFileExistsAndIsSuperFile(self, clusterFromUser): + self.file_size_limit = file_size_limit + self.file_type = file_type + self.if_exists = if_exists + self.if_superfile = is_superfile + self.actual_file_size = actual_file_size + self.record_count = record_count + self.despray_ip = despray_ip + self.despray_path = despray_path + self.despray_allow_overwrite = despray_allow_overwrite + self.should_despray = should_despray + self.check_status = check_status + self.csv_separator_for_read = csv_separator_for_read + self.read_status = read_status + self.despray_from_cluster = despray_from_cluster + self.csv_header_flag = csv_header_flag + + def check_if_file_exists_and_is_super_file(self, cluster_from_user): """Function to check if the file exists and is a superfile Parameters ---------- - clusterFromUser: + cluster_from_user: The cluster to check the file on """ - self.checkStatus = True - fileSearch = self.hpcc.fileQuery( - LogicalName=self.logicalFileName, + self.check_status = True + file_search = self.hpcc.file_query( + LogicalName=self.logical_file_name, LogicalFileSearchType="Logical Files and Superfiles", ) - self.ifExists = utils.getfileStatus(fileSearch) - if self.ifExists != 0 and self.ifExists != "0": - arrFESF = utils.getfileType(fileSearch) + self.if_exists = utils.get_file_status(file_search) + if self.if_exists != 0 and self.if_exists != "0": + arrFESF = utils.get_file_type(file_search) self.cluster = ( arrFESF["NodeGroup"] if arrFESF["NodeGroup"] is not None - else clusterFromUser + else cluster_from_user ) - self.ifSuperFile = ( + self.if_super_file = ( arrFESF["isSuperfile"] if arrFESF["isSuperfile"] is not None else "" ) - self.actualFileSize = ( + self.actual_file_size = ( int(arrFESF["Totalsize"].replace(",", "")) if arrFESF["Totalsize"] is not None else "" ) - self.fileType = ( + self.file_type = ( arrFESF["ContentType"] if arrFESF["ContentType"] is not None - else self.fileType + else self.file_type ) if bool(arrFESF): if arrFESF["RecordCount"] != "": - self.recordCount = ( + self.record_count = ( 0 if arrFESF["RecordCount"] is None else int(arrFESF["RecordCount"].replace(",", "")) ) else: - self.recordCount = -2 + self.record_count = -2 else: - self.fileType = "" - self.ifSuperFile = "" - self.actualFileSize = None - self.recordCount = None + self.file_type = "" + self.if_super_file = "" + self.actual_file_size = None + self.record_count = None self.cluster = "" - self.readStatus = "File doesn't exist" + self.read_status = "File doesn't exist" - def setFilename(self, filename): + def set_file_name(self, file_name): """Function to set the logical file name and check if the file exists and is a superfile Parameters ---------- - filename: + file_name: The logical file name """ - self.logicalFileName = filename - self.checkIfFileExistsAndIsSuperFile(self.cluster) + self.logical_file_name = file_name + self.check_if_file_exists_and_is_super_file(self.cluster) - def getSubFileInformation(self): + def get_sub_file_information(self): """Function to get the subfile information Parameters @@ -1311,15 +1313,15 @@ def getSubFileInformation(self): subFileInformation: The subfile information if the file is a superfile, else returns a message "Not a superfile" """ - if not self.checkStatus: - self.checkIfFileExistsAndIsSuperFile(self.cluster) - if self.isSuperFile == 1: - subFileInfo = self.hpcc.getSubFileInfo(Name=self.logicalFileName) - return utils.getSubfileNames(subFileInfo) + if not self.check_status: + self.check_if_file_exists_and_is_super_file(self.cluster) + if self.if_superfile == 1: + sub_file_info = self.hpcc.get_subfile_info(Name=self.logical_file_name) + return utils.get_subfile_names(sub_file_info) else: return "Not a superfile" - def checkfileinDFU(self): + def check_file_in_dfu(self): """Function to check if the file exists in the DFU queue Parameters @@ -1331,14 +1333,14 @@ def checkfileinDFU(self): dfuFileStatus: A boolean to determine if the file exists in the DFU queue """ - statusDetails = self.hpcc.checkFileExists(Name=self.logicalFileName) - status = utils.checkfileexistence(statusDetails) + status_details = self.hpcc.check_file_exists(Name=self.logical_file_name) + status = utils.check_file_existence(status_details) if status == 0: return False else: return True - def getData(self): + def get_data(self): """Function to get the data from the file Parameters @@ -1350,57 +1352,59 @@ def getData(self): data: The data from the file """ - self.checkIfFileExistsAndIsSuperFile(self.cluster) - if self.ifExists != 0 and self.ifExists != "0": - filesizeinMB = (self.actualFileSize / 1024) / 1024 + self.check_if_file_exists_and_is_super_file(self.cluster) + if self.if_exists != 0 and self.if_exists != "0": + file_size_in_mb = (self.actual_file_size / 1024) / 1024 if ( - filesizeinMB > self.fileSizelimit - or self.fileType == "xml" - or self.shouldDespray + file_size_in_mb > self.file_size_limit + or self.file_type == "xml" + or self.should_despray ): - if self.desprayIP != "" and self.desprayPath != "": - QueryString = ( + if self.despray_ip != "" and self.despray_path != "": + query_string = ( "IMPORT STD; STD.file.despray(~'" - + self.logicalFileName + + self.logical_file_name + "','" - + self.desprayIP + + self.despray_ip + "','" - + self.desprayPath + + self.despray_path + "',,,," - + self.desprayallowoverwrite + + self.despray_allow_overwrite + ");" ) - clusterfrom = "" - if self.desprayFromCluster == "": - clusterfrom = self.cluster + cluster_from = "" + if self.despray_from_cluster == "": + cluster_from = self.cluster else: - clusterfrom = self.desprayFromCluster + cluster_from = self.despray_from_cluster setattr(self.hpcc, "response_type", ".json") - self.readStatus = utils.desprayfile( + self.read_status = utils.despray_file( self.hpcc, - QueryString, - clusterfrom, - "Despraying : " + self.logicalFileName, + query_string, + cluster_from, + "Despraying : " + self.logical_file_name, ) else: - self.readStatus = "Unable to despray with the given input values. Please provide values for despray IP and folder" + self.read_status = "Unable to despray with the given input values. Please provide values for despray IP and folder" else: - if self.recordCount == -2: - countupdated = 9223372036854775807 + if self.record_count == -2: + count_updated = 9223372036854775807 else: - countupdated = self.recordCount - flatcsvresp = self.hpcc.getFileInfo( - LogicalName=self.logicalFileName, + count_updated = self.record_count + flat_csv_resp = self.hpcc.get_file_info( + LogicalName=self.logical_file_name, Cluster=self.cluster, - Count=countupdated, + Count=count_updated, ) - if self.fileType == "flat": - self.readStatus = "Read" - return utils.getflatdata(flatcsvresp) + if self.file_type == "flat": + self.read_status = "Read" + return utils.get_flat_data(flat_csv_resp) else: - self.readStatus = "Read" - return utils.getcsvdata( - flatcsvresp, self.csvSeperatorforRead, self.csvHeaderFlag + self.read_status = "Read" + return utils.get_csv_data( + flat_csv_resp, + self.csv_separator_for_read, + self.csv_header_flag, ) else: - self.readStatus = "File doesn't exist" + self.read_status = "File doesn't exist" diff --git a/pyhpcc/roxie_binder.py b/pyhpcc/roxie_binder.py index 9ad5d15..d4aed74 100644 --- a/pyhpcc/roxie_binder.py +++ b/pyhpcc/roxie_binder.py @@ -122,13 +122,13 @@ def execute(self): # Build the request URL full_url = ( self.api.auth.get_url() - + self.api.auth.pathDelimiter + + self.api.auth.path_delimiter + self.api.definition - + self.api.auth.pathDelimiter + + self.api.auth.path_delimiter + self.api.roxie_port - + self.api.auth.pathDelimiter - + self.api.searchservice - + self.api.auth.pathDelimiter + + self.api.auth.path_delimiter + + self.api.search_service + + self.api.auth.path_delimiter + "." + self.response_type ) diff --git a/pyhpcc/utils.py b/pyhpcc/utils.py index 1c72b07..7717070 100644 --- a/pyhpcc/utils.py +++ b/pyhpcc/utils.py @@ -1,7 +1,8 @@ -import six +import sys import xml.etree.ElementTree as ET + import pandas as pd -import sys +import six if sys.version_info[0] < 3: from StringIO import StringIO @@ -48,7 +49,7 @@ def convert_arg_to_utf8_str(arg): raise e -def create_compile_bash_command(repository, outputfile, filename): +def create_compile_bash_command(repository, output_file, file_name): """ Create a bash command to compile a file. @@ -56,9 +57,9 @@ def create_compile_bash_command(repository, outputfile, filename): ---------- repository : str The repository to compile the file from. - outputfile : str + output_file : str The output file to write the compiled code to. - filename : str + file_name : str The filename to compile. Returns @@ -73,19 +74,19 @@ def create_compile_bash_command(repository, outputfile, filename): """ try: return """ eclcc -legacy -I {0} -platform=thor -E -o {1} {2} -wu""".format( - repository, outputfile, filename + repository, output_file, file_name ) except HPCCException as e: raise e -def create_compile_file_name(filename): +def create_compile_file_name(file_name): """ Create a compiled file name from a filename. Parameters ---------- - filename : str + file_name : str The ecl filename to create a compiled file name from. Returns @@ -99,20 +100,20 @@ def create_compile_file_name(filename): A generic exception. """ try: - return filename.split(".")[0] + ".eclxml" + return file_name.split(".")[0] + ".eclxml" except HPCCException as e: raise e def create_run_bash_command( - compiledfile, cluster, ip, port, username, password, jobname + compiled_file, cluster, ip, port, user_name, password, job_name ): """ Create a bash command to run a compiled file. Parameters ---------- - compiledfile : str + compiled_file : str The compiled file to run. cluster : str The cluster to run the compiled file on. @@ -120,11 +121,11 @@ def create_run_bash_command( The ip address of the HPCC cluster. port : str The port of the HPCC cluster. - username : str + user_name : str The username to use to connect to the HPCC cluster. password : str The password to use to connect to the HPCC cluster. - jobname : str + job_name : str The name of the job to run. Returns @@ -139,7 +140,7 @@ def create_run_bash_command( """ try: return """ecl run {0} --limit=100 --wait=0 --target={1} --server={2} --ssl --port={3} -u={4} -pw={5} --name={6} -v""".format( - compiledfile, cluster, ip, port, username, password, jobname + compiled_file, cluster, ip, port, user_name, password, job_name ) except HPCCException as e: raise e @@ -188,7 +189,7 @@ def get_graph_skew(response): raise e -def getfileStatus(arg): +def get_file_status(arg): """ Parses the xml response to get NumFiles information @@ -217,7 +218,7 @@ def getfileStatus(arg): raise e -def getfileType(arg): +def get_file_type(arg): """ Parses the xml response to get FileType information @@ -243,9 +244,9 @@ def getfileType(arg): # Loop through the xml and get the FileType information for child in argET: if child.tag == "DFULogicalFiles": - for dfufile in child: - if dfufile.tag == "DFULogicalFile": - for fileinfo in dfufile: + for dfu_file in child: + if dfu_file.tag == "DFULogicalFile": + for fileinfo in dfu_file: if fileinfo.tag == "NodeGroup": data_dict.update({fileinfo.tag: fileinfo.text}) if fileinfo.tag == "isSuperfile": @@ -261,7 +262,7 @@ def getfileType(arg): raise e -def getSubfileNames(arg): +def get_subfile_names(arg): """ Parses the xml response to get SubfileNames information @@ -282,20 +283,20 @@ def getSubfileNames(arg): """ try: argET = ET.fromstring(arg.content) - subfilenamelist = [] + subfile_name_list = [] for child in argET: if child.tag == "DFUInfoResponse": - for dfufile in child: - if dfufile.tag == "subfiles": - for fileinfo in dfufile: - if fileinfo.tag == "Item": - subfilenamelist.append(fileinfo.text) - return pd.Series(subfilenamelist) + for dfu_file in child: + if dfu_file.tag == "subfiles": + for file_info in dfu_file: + if file_info.tag == "Item": + subfile_name_list.append(file_info.text) + return pd.Series(subfile_name_list) except HPCCException as e: raise e -def getflatdata(arg): +def get_flat_data(arg): """ Parses the xml response to get flat data @@ -323,15 +324,15 @@ def getflatdata(arg): if child.tag == "Dataset": for row in child: data_dict = {} - for rowdata in row: - data_dict[rowdata.tag] = rowdata.text + for row_data in row: + data_dict[row_data.tag] = row_data.text list_dict.append(data_dict) return pd.DataFrame(list_dict) except HPCCException as e: raise e -def getcsvdata(arg, csvSeperator, csvHeader): +def get_csv_data(arg, csv_separator, csv_header): """ Parses the xml response to get csv data @@ -339,9 +340,9 @@ def getcsvdata(arg, csvSeperator, csvHeader): ---------- arg : str The xml response - csvSeperator : str + csv_separator : str The csv seperator - csvHeader : str + csv_header : str The csv header Returns @@ -363,27 +364,27 @@ def getcsvdata(arg, csvSeperator, csvHeader): if child.tag == "Dataset": for row in child: data_dict = {} - for rowdata in row: - data_dict[rowdata.tag] = rowdata.text + "\n" + for row_data in row: + data_dict[row_data.tag] = row_data.text + "\n" list_dict.append(data_dict) - xmlstr = "" - for eachdict in list_dict: - for key, value in list(eachdict.items()): + xml_str = "" + for each_dict in list_dict: + for key, value in list(each_dict.items()): if key == "line": if "None" in str(value): - xmlstr = xmlstr + xml_str = xml_str else: - xmlstr = xmlstr + str(value) - csvformatdata = StringIO(xmlstr) - if csvHeader == 0: - return pd.read_csv(csvformatdata, sep=csvSeperator, header=csvHeader) + xml_str = xml_str + str(value) + csv_format_data = StringIO(xml_str) + if csv_header == 0: + return pd.read_csv(csv_format_data, sep=csv_separator, header=csv_header) else: - return pd.read_csv(csvformatdata, sep=csvSeperator, header=None) + return pd.read_csv(csv_format_data, sep=csv_separator, header=None) except HPCCException as e: raise e -def checkfileexistence(arg): +def check_file_existence(arg): """ Parses the xml response to check if the file exists @@ -436,7 +437,7 @@ def checkfileexistence(arg): raise e -def desprayfile(hpcc, querytext, Cluster, jobn): +def despray_file(hpcc, query_text, cluster, jobn): """ ToDo: Desprays a file from the HPCC cluster to the local machine @@ -444,9 +445,9 @@ def desprayfile(hpcc, querytext, Cluster, jobn): ---------- hpcc : hpcc.HPCCConnection The HPCC connection object - querytext : str + query_text : str The query text - Cluster : str + cluster : str The cluster name jobn : str The job name @@ -464,9 +465,9 @@ def desprayfile(hpcc, querytext, Cluster, jobn): try: if hpcc is None: raise HPCCException("HPCC Connection is not established") - if querytext is None: + if query_text is None: raise HPCCException("Query text is not provided") - if Cluster is None: + if cluster is None: raise HPCCException("Cluster name is not provided") if jobn is None: raise HPCCException("Job name is not provided") diff --git a/tests/test_hpcc_api.py b/tests/test_hpcc_api.py index fdf9cd1..f05ba08 100644 --- a/tests/test_hpcc_api.py +++ b/tests/test_hpcc_api.py @@ -1,13 +1,13 @@ # Unit tests to test the authentication module -import os # used by test_UploadFile +import os # used by test_upload_file import unittest -from datetime import datetime # used by test_UploadFile +from datetime import datetime # used by test_upload_file import config from pyhpcc.auth import Auth from pyhpcc.errors import HPCCException -from pyhpcc.models import hpcc +from pyhpcc.models import HPCC class TestHPCCAPI(unittest.TestCase): @@ -21,63 +21,63 @@ class TestHPCCAPI(unittest.TestCase): DUMMY_HPCC_PORT = config.DUMMY_HPCC_PORT AUTH_OBJ = Auth(HPCC_HOST, HPCC_PORT, HPCC_USERNAME, HPCC_PASSWORD, True, "https") - HPCC_OBJ = hpcc(AUTH_OBJ) + HPCC_OBJ = HPCC(AUTH_OBJ) - # Used by test_AddtoSuperfileRequest, test_getSubFileInfoall + # Used by test_add_to_superfile_request, test_getSubFileInfoall # Generic superfile created for PyHPCC testing purposes on Boca Dataland SUPER_FILE_NAME = "" # TODO: enter super file name - # Used by test_getdfuInfo, test_getFileInfo, test_checkFileExists, test_AddtoSuperfileRequest, test_fileQuery + # Used by test_get_dfu_info, test_get_file_info, test_check_file_exists, test_add_to_superfile_request, test_file_query # Generic logical file created for PyHPCC testing purposes on Boca Dataland LOGICAL_FILE_NAME = "" # TODO: enter logical file name - # Used by test_get_wuinfo, test_get_wuresult + # Used by test_get_wu_info, test_get_wu_result # No particular reason why this wu was chosen. WU is on Boca Dataland. May have to be updated since it may get archived. STATIC_WUID = "" # TODO: enter workunit id - # Used by test_wu_Submit, test_WUWaitCompiled, test_wuRun, test_WUUpdate + # Used by test_wu_submit, test_wu_wait_compiled, test_wu_run, test_wu_update # No particular reason why this wu was chosen. WU is on Boca Dataland. May have to be updated since it may get archived. STATIC_WUID2 = "" # TODO: enter workunit id - # Used by test_WUWaitComplete - # Need to choose a workunit that is in the compile or run state, although any wuid will work for testing WUWaitComplete. + # Used by test_wu_wait_complete + # Need to choose a workunit that is in the compile or run state, although any wuid will work for testing wu_wait_complete. # For state=unknown or state=paused, the wait will be infinite if the state is not updated for wu completion. STATIC_WUID3 = "" # TODO: enter workunit id - # Used by test_getgraph + # Used by test_get_graph STATIC_WUID4 = "" # TODO: enter workunit id - # Used by test_get_wuquery + # Used by test_get_wu_query STATIC_WUID5 = "" # TODO: enter workunit id - # Used by test_getDfuWorkunitInfo + # Used by test_get_dfu_workunit_info # No particular reason why this wu was chosen. WU is on Boca Dataland. May have to be updated since it may get archived. STATIC_DFU_WUID = "" # TODO: enter DFU workunit id - # Used by test_UploadFile + # Used by test_upload_file # Points to bctlpedata12 landing zone. Used since this is a familiar landing zone. LANDING_ZONE_IP = "" # TODO: Update with landing zone IP LANDING_ZONE_PATH = "" # TODO: Update with landing zone path - # Used by test_sprayFixed, test_sprayVariable + # Used by test_spray_fixed, test_spray_variable SPRAY_CLUSTER = "" # TODO: Update with cluster name - # Used by test_sprayFixed + # Used by test_spray_fixed SPRAY_FIXED_SOURCE_FILE = "" # TODO: Update with fixed spray source file path SPRAY_FIXED_DEST_FILE = "" # TODO: Update with fixed spray destination file name - # Used by test_sprayVariable + # Used by test_spray_variable SPRAY_VARIABLE_SOURCE_FILE = "" # TODO: Update with variable spray source file path SPRAY_VARIABLE_DEST_FILE = ( "" # TODO: Update with variable spray destination file name ) - # Used by test_downloadfile + # Used by test_download_file DOWNLOAD_FILE_NAME = "" # TODO: Update with download file name - def test_Activity(self): + def test_activity(self): payload = {"SortBy": "Name", "Descending": 1} - response = self.HPCC_OBJ.Activity(**payload).json() + response = self.HPCC_OBJ.activity(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -90,13 +90,13 @@ def test_Activity(self): # workunits contains a list of dictionaries. Testing for an empty or populated list. self.assertGreaterEqual(len(workunits_list), 0) - def test_fileList(self): + def test_file_list(self): payload = { "Netaddr": "", # TODO: Update with landing zone IP "Path": "/data", "OS": 2, } - response = self.HPCC_OBJ.fileList(**payload).json() + response = self.HPCC_OBJ.file_list(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -109,10 +109,10 @@ def test_fileList(self): # workunits contains a list of dictionaries. Testing for an empty or populated list. self.assertGreaterEqual(len(files_list), 0) - def test_dfuQuery(self): + def test_dfu_query(self): first_n_files = 10 payload = {"FirstN": first_n_files} - response = self.HPCC_OBJ.dfuQuery(**payload).json() + response = self.HPCC_OBJ.dfu_query(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -128,9 +128,9 @@ def test_dfuQuery(self): ] self.assertEqual(len(dfu_files_list), first_n_files) - def test_getdfuInfo(self): + def test_get_dfu_info(self): payload = {"Name": self.LOGICAL_FILE_NAME} - response = self.HPCC_OBJ.getdfuInfo(**payload).json() + response = self.HPCC_OBJ.get_dfu_info(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -140,10 +140,10 @@ def test_getdfuInfo(self): self.LOGICAL_FILE_NAME, response["DFUInfoResponse"]["FileDetail"]["Name"] ) - def test_wuQuery(self): + def test_wu_query(self): page_size = 5 payload = {"PageSize": page_size} - response = self.HPCC_OBJ.wuQuery(**payload).json() + response = self.HPCC_OBJ.wu_query(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -155,9 +155,9 @@ def test_wuQuery(self): wu_list = response["WUQueryResponse"]["Workunits"]["ECLWorkunit"] self.assertEqual(len(wu_list), page_size) - def test_get_wuinfo(self): + def test_get_wu_info(self): payload = {"Wuid": self.STATIC_WUID} - response = self.HPCC_OBJ.get_wuinfo(**payload).json() + response = self.HPCC_OBJ.get_wu_info(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -167,10 +167,10 @@ def test_get_wuinfo(self): self.STATIC_WUID, response["WUInfoResponse"]["Workunit"]["Wuid"] ) - def test_getDfuWorkunits(self): + def test_get_dfu_workunits(self): page_size = 5 payload = {"PageSize": page_size} - response = self.HPCC_OBJ.getDfuWorkunits(**payload).json() + response = self.HPCC_OBJ.get_dfu_workunits(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -182,9 +182,9 @@ def test_getDfuWorkunits(self): dfu_wu_list = response["GetDFUWorkunitsResponse"]["results"]["DFUWorkunit"] self.assertEqual(len(dfu_wu_list), page_size) - def test_getDfuWorkunitInfo(self): + def test_get_dfu_workunit_info(self): payload = {"wuid": self.STATIC_DFU_WUID} - response = self.HPCC_OBJ.getDfuWorkunitInfo(**payload).json() + response = self.HPCC_OBJ.get_dfu_workunit_info(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -194,9 +194,9 @@ def test_getDfuWorkunitInfo(self): self.STATIC_DFU_WUID, response["GetDFUWorkunitResponse"]["result"]["ID"] ) - def test_get_wuresult(self): + def test_get_wu_result(self): payload = {"Wuid": self.STATIC_WUID, "Sequence": 0} - response = self.HPCC_OBJ.get_wuresult(**payload).json() + response = self.HPCC_OBJ.get_wu_result(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -204,9 +204,9 @@ def test_get_wuresult(self): ) self.assertEqual("Rules", response["WUResultResponse"]["Name"]) - def test_getFileInfo(self): + def test_get_file_info(self): payload = {"LogicalName": self.LOGICAL_FILE_NAME} - response = self.HPCC_OBJ.getFileInfo(**payload).json() + response = self.HPCC_OBJ.get_file_info(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -216,10 +216,10 @@ def test_getFileInfo(self): record_count = 100 self.assertEqual(record_count, response["WUResultResponse"]["Count"]) - def test_TpClusterInfo(self): + def test_tp_cluster_info(self): cluster_name = "" # TODO: Update with cluster name payload = {"Name": cluster_name} - response = self.HPCC_OBJ.TpClusterInfo(**payload).json() + response = self.HPCC_OBJ.tp_cluster_info(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -227,9 +227,9 @@ def test_TpClusterInfo(self): ) self.assertEqual(cluster_name, response["TpClusterInfoResponse"]["Name"]) - def test_getSubFileInfo(self): + def test_get_subfile_info(self): payload = {"Name": self.SUPER_FILE_NAME} - response = self.HPCC_OBJ.getSubFileInfo(**payload).json() + response = self.HPCC_OBJ.get_subfile_info(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -244,9 +244,9 @@ def test_getSubFileInfo(self): len(response["DFUInfoResponse"]["FileDetail"]["subfiles"]["Item"]), 0 ) - def test_checkFileExists(self): + def test_check_file_exists(self): payload = {"LogicalName": self.LOGICAL_FILE_NAME} - response = self.HPCC_OBJ.checkFileExists(**payload).json() + response = self.HPCC_OBJ.check_file_exists(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -257,7 +257,7 @@ def test_checkFileExists(self): # A file can exist which will result in a number > 0, or not exist which makes the number of files 0. self.assertGreaterEqual(response["DFUQueryResponse"]["NumFiles"], 0) - def test_wuCreateAndUpdate(self): + def test_wu_create_and_update(self): cluster = "" # TODO: Update with cluster name job_name = "PyHPCC Test Case" payload = { @@ -267,7 +267,7 @@ def test_wuCreateAndUpdate(self): "ResultLimit": 100, "ClusterOrig": cluster, } - response = self.HPCC_OBJ.wuCreateAndUpdate(**payload).json() + response = self.HPCC_OBJ.wu_create_and_update(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -279,12 +279,12 @@ def test_wuCreateAndUpdate(self): job_name, response["WUUpdateResponse"]["Workunit"]["Jobname"] ) - # !!!IMPORTANT: Add to documentation: wu_Submit compiles the workunit created in previous test case - def test_wu_Submit(self): + # !!!IMPORTANT: Add to documentation: wu_submit compiles the workunit created in previous test case + def test_wu_submit(self): # we need to retrieve the wuid created in the previous test case in this function to submit wu to cluster cluster = "" # TODO: Update with cluster name payload = {"Wuid": self.STATIC_WUID2, "Cluster": cluster} - response = self.HPCC_OBJ.wuSubmit(**payload).json() + response = self.HPCC_OBJ.wu_submit(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -293,9 +293,9 @@ def test_wu_Submit(self): self.assertIn("WUSubmitResponse", response) - def test_WUWaitCompiled(self): + def test_wu_wait_compiled(self): payload = {"Wuid": self.STATIC_WUID2} - response = self.HPCC_OBJ.WUWaitCompiled(**payload).json() + response = self.HPCC_OBJ.wu_wait_compiled(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -305,11 +305,11 @@ def test_WUWaitCompiled(self): self.assertIn("WUWaitResponse", response) # !!!IMPORTANT: Add to documentation: wuRun can only be used on compiled workunit ids - def test_wuRun(self): + def test_wu_run(self): # we need to retrieve the wuid created in the previous test case in this function to submit wu to cluster cluster = "" # TODO: Update with cluster name payload = {"Wuid": self.STATIC_WUID2, "Cluster": cluster} - response = self.HPCC_OBJ.wuRun(**payload).json() + response = self.HPCC_OBJ.wu_run(**payload).json() # !!!! may need to use the wait API call before asserting @@ -321,7 +321,7 @@ def test_wuRun(self): self.assertIn(self.STATIC_WUID2, response["WURunResponse"]["Wuid"]) self.assertIn("Hello World", response["WURunResponse"]["Results"]) - def test_WUUpdate(self): + def test_wu_update(self): state = 0 payload = {"Wuid": self.STATIC_WUID2, "State": state} response = self.HPCC_OBJ.WUUpdate(**payload).json() @@ -333,9 +333,9 @@ def test_WUUpdate(self): self.assertEqual(response["WUUpdateResponse"]["Workunit"]["StateID"], state) - def test_WUWaitComplete(self): + def test_wu_wait_complete(self): payload = {"Wuid": self.STATIC_WUID3} - response = self.HPCC_OBJ.WUWaitComplete(**payload).json() + response = self.HPCC_OBJ.wu_wait_complete(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -345,7 +345,7 @@ def test_WUWaitComplete(self): # states can be between 0 and 17(inclusive). See config.py file for wu state id mapping. self.assertIn(response["WUWaitResponse"]["StateID"], list(range(18))) - def test_UploadFile(self): + def test_upload_file(self): current_directory = os.getcwd() file_name = os.path.join( current_directory, "tests", "test_files", "pyhpcc_spray_fixed.csv" @@ -371,7 +371,7 @@ def test_UploadFile(self): "Path": self.LANDING_ZONE_PATH, "files": files, } - response = self.HPCC_OBJ.UploadFile(**payload).json() + response = self.HPCC_OBJ.upload_file(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -386,7 +386,7 @@ def test_UploadFile(self): ) # OBSERVATION: a wu is created and runs successfully, but there are no contents. Check why the contents do not get listed under the content section. - def test_sprayFixed(self): + def test_spray_fixed(self): payload = { "sourceIP": self.LANDING_ZONE_IP, "sourcePath": self.SPRAY_FIXED_SOURCE_FILE, @@ -398,7 +398,7 @@ def test_sprayFixed(self): "compress": "false", "failIfNoSourceFile": "true", } - response = self.HPCC_OBJ.sprayFixed(**payload).json() + response = self.HPCC_OBJ.spray_fixed(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -408,7 +408,7 @@ def test_sprayFixed(self): # A DFU workunit is created when spraying the file. Response returns this wuid. self.assertNotEqual(response["SprayFixedResponse"]["wuid"], "") - def test_sprayVariable(self): + def test_spray_variable(self): payload = { "sourceIP": self.LANDING_ZONE_IP, "sourcePath": self.SPRAY_VARIABLE_SOURCE_FILE, @@ -421,7 +421,7 @@ def test_sprayVariable(self): "compress": "false", "failIfNoSourceFile": "true", } - response = self.HPCC_OBJ.sprayVariable(**payload).json() + response = self.HPCC_OBJ.spray_variable(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -432,9 +432,9 @@ def test_sprayVariable(self): self.assertNotEqual(response["SprayResponse"]["wuid"], "") # This test case will fail if the workunit id referred to is archived. - def test_getgraph(self): + def test_get_graph(self): payload = {"Wuid": self.STATIC_WUID4, "GraphName": "graph1"} - response = self.HPCC_OBJ.getgraph(**payload).json() + response = self.HPCC_OBJ.get_graph(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -447,9 +447,9 @@ def test_getgraph(self): "RulesFile", ) - def test_get_wuquery(self): + def test_get_wu_query(self): payload = {"Wuid": self.STATIC_WUID5} - response = self.HPCC_OBJ.get_wuquery(**payload).json() + response = self.HPCC_OBJ.get_wu_query(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -462,9 +462,9 @@ def test_get_wuquery(self): self.HPCC_USERNAME, ) - def test_fileQuery(self): + def test_file_query(self): payload = {"LogicalName": self.LOGICAL_FILE_NAME} - response = self.HPCC_OBJ.fileQuery(**payload).json() + response = self.HPCC_OBJ.file_query(**payload).json() if "Exceptions" in response: raise HPCCException( @@ -479,14 +479,14 @@ def test_fileQuery(self): self.LOGICAL_FILE_NAME, ) - def test_downloadfile(self): + def test_download_file(self): payload = { "Name": self.DOWNLOAD_FILE_NAME, "NetAddress": self.LANDING_ZONE_IP, "Path": self.LANDING_ZONE_PATH, "OS": 2, } - response = self.HPCC_OBJ.downloadfile(**payload) + response = self.HPCC_OBJ.download_file(**payload) if "Exceptions" in response: raise HPCCException( @@ -498,12 +498,12 @@ def test_downloadfile(self): self.assertEqual(response.status_code, 200) # !!! This test case will be uncommented for use once the remove subfile from a superfile feature API method is available - # def test_AddtoSuperfileRequest(self): + # def test_add_to_superfile_request(self): # payload = { 'Superfile': self.SUPER_FILE_NAME, # 'ExistingFile': 1, # 'names_i0': self.LOGICAL_FILE_NAME # } - # response = self.HPCC_OBJ.AddtoSuperfileRequest(**payload).json() + # response = self.HPCC_OBJ.add_to_superfile_request(**payload).json() # if 'Exceptions' in response: # raise HPCCException(message=response['Exceptions']['Exception'][0]['Message'])