Skip to content

Commit

Permalink
converting "test types" to "test environments"
Browse files Browse the repository at this point in the history
  • Loading branch information
PhillipsOwen committed Mar 19, 2024
1 parent c71a059 commit 724c1c4
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 70 deletions.
4 changes: 2 additions & 2 deletions src/common/pg_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,15 @@ def __del__(self):
# clean up connections and cursors
PGUtilsMultiConnect.__del__(self)

def get_test_type_names(self):
def get_environment_type_names(self):
"""
gets the test types
:return:
"""

# create the sql
sql: str = "SELECT public.get_test_type_names_json();"
sql: str = "SELECT public.get_environment_type_names_json();"

# get the data
ret_val = self.exec_sql('irods-sv', sql)
Expand Down
122 changes: 54 additions & 68 deletions src/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@ async def get_sv_component_versions() -> json:
return JSONResponse(content=ret_val, status_code=status_code, media_type="application/json")


@APP.get('/get_test_type_names', dependencies=[Depends(JWTBearer(security))], status_code=200, response_model=None)
async def get_test_type_names() -> json:
@APP.get('/get_environment_type_names', dependencies=[Depends(JWTBearer(security))], status_code=200, response_model=None)
async def get_environment_type_names() -> json:
"""
Returns the distinct test types.
Expand All @@ -120,15 +120,15 @@ async def get_test_type_names() -> json:

try:
# try to make the call for records
ret_val = db_info.get_test_type_names()
ret_val = db_info.get_environment_type_names()

# was there an error?
if ret_val == -1:
ret_val = {'Warning': 'No data found.'}

except Exception:
# return a failure message
msg: str = 'Exception detected trying to get the test suite types.'
msg: str = 'Exception detected trying to get the environment types.'

# log the exception
logger.exception(msg)
Expand Down Expand Up @@ -519,80 +519,66 @@ async def superv_workflow_request(workflow_type: WorkflowTypeName, run_status: R
# convert the string to a dict
test_request = json.loads(tests)

# create base request db object
base_request_data: dict = {'workflow-type': workflow_type, 'db-image': db_image, 'db-type': db_type, "os-image": os_image,
'package-dir': package_dir, 'tests': None}

# if there are tests declared
if len(test_request) > 0:
# init a couple storage lists
provider_tests: list = []
consumer_tests: list = []

# define the max number of tests in a short running group
short_batch_size: int = int(os.getenv('SHORT_BATCH_SIZE', '10'))

# define the max number of tests in a long running group
long_batch_size: int = int(os.getenv('LONG_BATCH_SIZE', '2'))

# get a list of all the tests
test_list: list = db_info.get_test_names()

# get a lst of the tests that are long-running
long_running_tests: list = [el['label'] for el in test_list if el['description'] == 'L']

# there could be a set of tests for a provider and/or a consumer for each test request
for test_type in test_request:
# if provider tests were requests
if 'PROVIDER' in test_type:
# get a list of the long-running tests requested
long_runners: list = [test for test in test_type['PROVIDER'] if test in long_running_tests]

# the long runners go off in batches of LONG_BATCH_SIZE
for batch in batched(long_runners, long_batch_size):
# append the test group
provider_tests.append({'PROVIDER': batch})

# get the list of shorter running tests
short_runners: list = [test for test in test_type['PROVIDER'] if test not in long_running_tests]

# the rest go off in batches of SHORT_BATCH_SIZE for short-running tests
for batch in batched(short_runners, short_batch_size):
# append the test group
provider_tests.append({'PROVIDER': batch})

# if consumer tests were requested
elif 'CONSUMER' in test_type:
# just save the consumer tests requested for now, no batching
consumer_tests = test_type
else:
logger.warning('Unrecognized test type for request group: %s.', request_group)

# if there were no provider tests found
if len(provider_tests) > 0:
# insert each test group into the DB
for item in provider_tests:
# start a single test group
final_request: list = [item]
# create base request db object
base_request_data: dict = {'workflow-type': workflow_type, 'db-image': db_image, 'db-type': db_type, "os-image": os_image,
'package-dir': package_dir, 'tests': None}

# get the run location
run_location = next(iter(test_request))

# was there a valid run location?
if run_location in ['CONSUMER', 'PROVIDER']:

# init a storage list for the tests
tests: list = []

# define the max number of tests in a short-running group
short_batch_size: int = int(os.getenv('SHORT_BATCH_SIZE', '10'))

# define the max number of tests in a long-running group
long_batch_size: int = int(os.getenv('LONG_BATCH_SIZE', '2'))

# get a list of all the tests
test_list: list = db_info.get_test_names()

# if there were consumer tests, add it in. every provider test group gets all the consumer tests
if len(consumer_tests) > 0:
final_request.append(consumer_tests)
# get a lst of the tests that are long-running
long_running_tests: list = [el['label'] for el in test_list if el['description'] == 'L']

# get a list of the long-running tests requested
long_runners: list = [test for test in test_request[run_location] if test in long_running_tests]

# the long runners go off in batches of LONG_BATCH_SIZE
for batch in batched(long_runners, long_batch_size):
# append the test group
tests.append({run_location: batch})

# get the list of shorter running tests
short_runners: list = [test for test in test_request[run_location] if test not in long_running_tests]

# the rest go off in batches of SHORT_BATCH_SIZE for short-running tests
for batch in batched(short_runners, short_batch_size):
# append the test group
tests.append({run_location: batch})
else:
logger.warning('Unrecognized test type for request group: %s.', request_group)

# if there were tests found
if len(tests) > 0:
# insert each test group into the DB
for item in tests:
# build up the json for the DB
base_request_data['tests'] = final_request
base_request_data['tests'] = item

# insert the record
db_ret_val = db_info.insert_superv_request(run_status.value, base_request_data, request_group)

# else there were no valid tests requested
else:
# build up the json and insert the record
db_ret_val = db_info.insert_superv_request(run_status.value, base_request_data, request_group)

# else let it pass through
ret_val = {'Error': 'No valid tests found.'}
# else there were no tests requested
else:
# build up the json and insert the record
db_ret_val = db_info.insert_superv_request(run_status.value, base_request_data, request_group)
ret_val = {'Error': 'No tests requested.'}

# check the result
if db_ret_val != 0:
Expand Down

0 comments on commit 724c1c4

Please sign in to comment.