From 5e0442a366cb401eac2fb4bfa215f3376f83dd0b Mon Sep 17 00:00:00 2001 From: adamoutler Date: Wed, 8 Feb 2023 18:31:48 +0000 Subject: [PATCH] GAF to handle various server failures --- Dockerfile | 2 +- src/aidgaf/aidgaf-server/idgaf.py | 12 ++++++--- src/aidgaf/aidgaf-server/settings.py | 38 +++++++++++++++------------- 3 files changed, 31 insertions(+), 21 deletions(-) diff --git a/Dockerfile b/Dockerfile index e46d209..370914d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ # docker build -t aidgaf . #for test -# docker run --rm -eSERVERPORT=8087 -eAPIKEY=sk-AaKV.........................HZ4v2ipzFm6 -p8087:8087 -it aidgaf +# docker run --rm -eAPIKEY=sk-AaKV.........................HZ4v2ipzFm6 -p8087:8087 -it aidgaf # curl -X PATCH http://127.0.0.1:8087 -d "{\"message\":{\"command\":\"aidgaf\",\"data\":{\"username\":\"AdamOutler\"}}}" #for deployment diff --git a/src/aidgaf/aidgaf-server/idgaf.py b/src/aidgaf/aidgaf-server/idgaf.py index 2aca0d6..894bd30 100644 --- a/src/aidgaf/aidgaf-server/idgaf.py +++ b/src/aidgaf/aidgaf-server/idgaf.py @@ -69,7 +69,11 @@ def parse_idgaf_request(command): the_data = get_prompt(command) gpt_response = requests.post(URL, json=the_data, headers=request_headers) print(gpt_response) - response_text = gpt_response.json()['choices'][0]['text'].strip() + try: + response_text = gpt_response.json()['choices'][0]['text'].strip() + except (KeyError): + response_text=gpt_response.text + obj = get_response_base_object(response_text) obj['hash']=get_message_hash(json.dumps(obj)) json_result = json.dumps(obj) @@ -94,8 +98,10 @@ def get_message_hash(json_command) -> bytes: strip1 = re.sub('.*\"message\":', "", json_command, 1) strip2 = re.sub(',\"hash\":.*', '', strip1) json_value = bytes(strip2, "utf-8") - hash_value = hash_calculator.calculate_hash(json_value, settings.HASHKEY) - return hash_value + if (settings.HASHKEY is not None): + hash_value = hash_calculator.calculate_hash(json_value, settings.HASHKEY) + return hash_value + return "" def verify_message_time(json_command) -> bool: diff --git a/src/aidgaf/aidgaf-server/settings.py b/src/aidgaf/aidgaf-server/settings.py index 4a13777..8a4802b 100644 --- a/src/aidgaf/aidgaf-server/settings.py +++ b/src/aidgaf/aidgaf-server/settings.py @@ -1,18 +1,22 @@ import os -#The hostname used by this app -HOSTNAME:str = os.getenv('HOSTNAME') #localhost or some name -#The port to broadcast the server -SERVERPORT:int = int(os.getenv('SERVERPORT')) #8087 or the port you want to run on. pass in with docker -e command. -#The API key for OpenAI -APIKEY:str = os.getenv('APIKEY') #secret key from OpenAPI website -if APIKEY is None: - raise Exception("APIKEY Environmental Variable must be set") -#The hash key -HASHKEY:str = bytes(os.getenv('HASHKEY'),'utf-8') #shared secret for hmac of message -#The prompts used for OpenAI. -PROMPTS=["Say \"USERNAME does not give a fuck\" in a thoughtful and clever paragraph of 5 sentences.", - "Say \"USERNAME does not give a fuck\" in a Dr Suess poem.", - "Tell me all about how much \"USERNAME does not give a fuck\" using your most colorful words."] -OPEN_AI_MAX_TOKENS=500 -OPEN_AI_COMPLETION_MODEL="text-davinci-003" -MAX_MESSAGE_AGE=600 \ No newline at end of file +# The hostname used by this app +HOSTNAME: str = os.getenv('HOSTNAME') # localhost or some name +# The port to broadcast the server +# 8087 or the port you want to run on. pass in with docker -e command. +SERVERPORT: int = 8087 +# The API key for OpenAI +APIKEY: str = os.getenv('APIKEY') # secret key from OpenAPI website +if APIKEY is None: + raise Exception("APIKEY Environmental Variable must be set") +# The hash key +HASHKEY: str = None +hashKey = os.getenv('HASHKEY') # shared secret for hmac of message +if (hashKey is not None and hashKey.replace(" ", "") != ""): + HASKHEY = bytes(hashKey, "utf-8") +# The prompts used for OpenAI. +PROMPTS = ["Say \"USERNAME does not give a fuck\" in a thoughtful and clever paragraph of 5 sentences.", + "Say \"USERNAME does not give a fuck\" in a Dr Suess poem.", + "Tell me all about how much \"USERNAME does not give a fuck\" using your most colorful words."] +OPEN_AI_MAX_TOKENS = 500 +OPEN_AI_COMPLETION_MODEL = "text-davinci-003" +MAX_MESSAGE_AGE = 600