Add documentation

This commit is contained in:
adamoutler 2023-02-12 20:22:45 +00:00
parent 3182959210
commit 87153f4194
10 changed files with 133 additions and 61 deletions

View File

@ -13,7 +13,10 @@
"extensions": [
"pomdtr.secrets",
"ms-python.python",
"donjayamanne.python-extension-pack"
"donjayamanne.python-extension-pack",
"ivory-lab.jenkinsfile-support",
"njpwerner.autodocstring",
"KevinRose.vsc-python-indent"
]
}
}

View File

@ -1,3 +1,7 @@
""" aidgaf-server. """
""" The name of the package"""
name = 'aidgaf'
""" The version of the package"""
version='0.1.0'

View File

@ -1,3 +1,5 @@
""" aidgaf-server entry point. """
import server
""" The main entry point for the aidgaf-server package."""
server.main()

View File

@ -1 +1,4 @@
"""Constants for the aidgaf-server package."""
""" The encoding to use for strings. """
UTF8="utf-8"

View File

@ -1,6 +1,18 @@
import hashlib
def calculate_hash(value:bytes, secret:bytes)->str:
""" This function calculates the hash of a value and a secret.
It is used to verify that the message is from the server. The hash
is calculated using the SHA512 algorithm. The hash is returned as a
hex string. This is not a secure hash, but it is good enough for
this application.
parameters:
value: The value to hash.
secret: The secret to hash with the value.
returns:
The hash of the value and secret. This is a hex string.
"""
m = hashlib.sha512()
m.update(b"".join([value,secret]))
return m.hexdigest()

View File

@ -1,3 +1,6 @@
""" This file contains the logic for the IDGAF server. It can be used to make a stand-alone
IDGAF server, or it can be used as a module in a larger application.
"""
import json
import random
import security
@ -5,26 +8,29 @@ import re
import requests
from time import time
from datetime import datetime
import openai
import settings
openai.organization = "org-hNNV1yHjZp7T3pn5pdZWaKLm"
# print(openai.Model.list())
""" The URL for the OpenAI API. """
URL = "https://api.openai.com/v1/completions"
""" The data to send to OpenAI. """
DATA = {"model": settings.OPEN_AI_COMPLETION_MODEL,
"prompt": settings.PROMPTS[0],
"temperature": settings.TEMPERATURE,
"max_tokens": settings.OPEN_AI_MAX_TOKENS
}
""" The headers to send to OpenAI. """
request_headers = {"Authorization": "Bearer " +
settings.APIKEY, "Content-Type": "application/json"}
def get_response_base_object(text):
def get_response_base_object(text:str) -> dict:
""" This is used to create the response object for the server.
Parameters:
text: The text to return to the client.
Returns:
A dictionary containing the response object.
"""
resultObject = {}
resultObject["message"] = {}
resultObject["service"] = "AIDGAF Server"
@ -33,7 +39,14 @@ def get_response_base_object(text):
resultObject["timestamp"] = datetime.utcnow().timestamp()
return resultObject
def parse_idgaf_request(command):
def parse_idgaf_request(command)->[int, dict]:
""" This function handles the IDGAF command. It will return a response object.
Parameters:
command: The command object received from the client.
Returns:
A tuple containing the status code and the response object.
"""
the_data = get_prompt(command)
response = get_gpt_response(the_data)
try:
@ -43,12 +56,23 @@ def parse_idgaf_request(command):
obj = get_response_base_object(response_text)
return [response.status_code, obj]
def get_gpt_response(data):
def get_gpt_response(data) -> requests.Response:
""" This function communicates with OpenAI and returns a response object.
Parameters:
data: The data to send to OpenAI.
Returns:
The response object from OpenAI.
"""
gpt_response = requests.post(URL, json=data, headers=request_headers)
return gpt_response
def get_prompt(command):
def get_prompt(command)->dict:
""" Selects a prompt from the PROMPTS list and replaces the USERNAME placeholder with the username.
Parameters:
command: The command object received from the client.
Returns:
A dictionary containing the data to send to OpenAI.
"""
my_prompt = random.choice(settings.PROMPTS)
my_prompt = my_prompt.replace(
"USERNAME", command['message']['data']['username'])
@ -59,35 +83,9 @@ def get_prompt(command):
return the_data
def get_prompt(command):
my_prompt = random.choice(settings.PROMPTS)
my_prompt = my_prompt.replace(
"USERNAME", command['message']['data']['username'])
print("Prompt selected: "+my_prompt)
the_data = DATA
the_data["prompt"] = my_prompt
return the_data
value = '{"service":"papa","message":{"command":"aidgaf","data":{"username":"AdamOutler"},"timestamp":1675725191},"hash":"1bc73914478835d03f9ebdfb46328321d2bb656647e2876d6f162cc1860607fcfca8d825c48e390a6a254ee0835c8a4fe5f9a25795a3a0880ae5a23e9c132cf2"}'
if __name__ == "__main__":
""" This function is for testing the IDGAF capabilities without a server. """
value = '{"service":"papa","message":{"command":"aidgaf","data":{"username":"AdamOutler"},"timestamp":1675725191},"hash":"1bc73914478835d03f9ebdfb46328321d2bb656647e2876d6f162cc1860607fcfca8d825c48e390a6a254ee0835c8a4fe5f9a25795a3a0880ae5a23e9c132cf2"}'
command = json.loads(value)
[code, result] = parse_idgaf_request(command)
print(result)
# curl https://api.openai.com/v1/completions \
# -H "Content-Type: application/json" \
# -H "Authorization: Bearer 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" \
# -d '{"model": "text-curie-001",
# "prompt": "Say \"Adam does not give a fuck\" in a thoughtful and clever prose consisting of one to five paragraphs.",
# "temperature":1, "max_tokens": 500}'
# |jq -r .choices[0].text
# curl -X PATCH 127.0.0.1:8087 -d '{"message":{"command":"aidgaf","data":{"username":"AdamOutler"}}}'
# 2,500,000 tokens = $5

View File

@ -1,12 +1,18 @@
""" Security functions for the server. """
import re
import time
import hash_calculator
import settings
from const import UTF8
def perform_hash_checks(str_request)->bool:
""" Performs a hash check on the message, and verifies the timestamp is valid.
If either check fails, return False. Otherwise, return True.
def perform_hash_checks(str_request):
Parameters:
str_request: The request body, as a string.
Returns:
True if the message is valid, False otherwise."""
hash = get_message_hash(str_request)
if hash not in str_request:
@ -17,9 +23,14 @@ def perform_hash_checks(str_request):
return False
return True
def get_message_hash(json_command) -> str:
"""Get the object named "message", and run a hash over it. """
""" Get the object named "message", and run a hash over it. The hash is calculated
using the SHA512 algorithm. The hash is returned as a hex string.
Parameters:
json_command: The JSON command to hash.
Returns:
The hash of the message, as a string. """
strip1 = re.sub(".*\"message\":", "", json_command, 1)
if ("\"hash\":" in strip1):
strip2 = re.sub(',\"hash\":.*', '', strip1)

View File

@ -1,4 +1,5 @@
""" aidgaf-server - A simple server to handle requests from the aidgaf client.
This server will communicate with OpenAI to generate responses to the client."""
import json
import random
@ -12,14 +13,17 @@ default_request_body = b'{"message":{"command":"aidgaf","data":{"username":"Adam
class IDGAFServer(BaseHTTPRequestHandler):
""" This class handles the requests from the client. """
def do_GET(self):
""" This function handles GET requests. """
self.send_response(418)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("", UTF8))
def do_PATCH(self):
""" This function handles PATCH requests. """
body = self.get_body().decode(UTF8)
if not perform_sanity_checks(body):
self.send_response(403)
@ -28,6 +32,9 @@ class IDGAFServer(BaseHTTPRequestHandler):
self.do_request_handling(command)
def do_request_handling(self, command):
""" This function handles the request.
Parameters:
command: The command object received from the client. """
print(command)
if command['message']['command'] == 'aidgaf':
[responseCode, json_response] = idgaf.parse_idgaf_request(command)
@ -35,7 +42,11 @@ class IDGAFServer(BaseHTTPRequestHandler):
response_body = json.dumps(json_response)
self.handle_response(responseCode, response_body)
def get_body(self):
def get_body(self)->bytes:
""" This function returns the body of the request.
Returns:
The body of the request.
"""
header_length = self.headers.get('Content-Length')
request_body = default_request_body
if header_length != None and self.headers.get('Content-Length') != None:
@ -47,13 +58,23 @@ class IDGAFServer(BaseHTTPRequestHandler):
def handle_response(self, code, body):
""" This function handles the response to the client.
Parameters:
code: The HTTP response code.
body: The body of the response. """
self.send_response(code)
self.send_header("Content-type", "text/html")
self.end_headers()
print("sending:"+body)
self.wfile.write(bytes(body, UTF8))
def perform_sanity_checks(str_request):
def perform_sanity_checks(str_request)->bool:
""" Performs a hash check on the message, and verifies the timestamp is valid.
If either check fails, return False. Otherwise, return True.
Parameters:
str_request: The request body as a string.
Returns:
True if the message is valid, False otherwise. """
if settings.HASHKEY is not None:
hash = security.get_message_hash(str_request)
if hash not in str_request:
@ -68,6 +89,7 @@ def perform_sanity_checks(str_request):
def main():
""" This function starts the server. """
webServer = HTTPServer(
(settings.HOSTNAME, settings.SERVERPORT), IDGAFServer)
print("Server started http://%s:%s" %
@ -83,4 +105,5 @@ def main():
if __name__ == "__main__":
""" This function starts the main method. """
main()

View File

@ -1,25 +1,40 @@
"""this is the settings file for the server. It contains the settings for the server.
"""
import os
from const import UTF8
# The hostname used by this app
""" The hostname of the server. """
HOSTNAME: str = os.getenv('HOSTNAME') # localhost or some name
# The port to broadcast the server
""" The port to broadcast the server """
# 8087 or the port you want to run on. pass in with docker -e command.
SERVERPORT: int = 8087
# The API key for OpenAI
""" The API key for OpenAI"""
APIKEY: str = os.getenv('APIKEY') # secret key from OpenAPI website
if APIKEY is None:
raise Exception("APIKEY Environmental Variable must be set")
# The hash key
HASHKEY = bytes(os.getenv('HASHKEY'),UTF8) # shared secret for hmac of message
# The prompts used for OpenAI.
""" The prompts used for OpenAI. When the server receives a request, it will
randomly select one of these prompts to use."""
PROMPTS = [
"Say \"USERNAME does not give a fuck\" as a haiku and mention that it is a haiku.",
"Say \"USERNAME does not give a fuck\" in a Dr Suess poem.",
"Tell me a story about how \"USERNAME does not give a fuck\" using an outrageous situation where someone should care but they do not and thats fine.",
"Say \"USERNAME is completely apethetic and does not give a fuck\" in a verbose manner, using your most colorful words and one metaphor."
]
""" The maximum number of tokens to use in a single OpenAI request. """
OPEN_AI_MAX_TOKENS = 500
""" The model to use for OpenAI. """
OPEN_AI_COMPLETION_MODEL = "text-davinci-003"
""" The temperature to use for OpenAI. 0-2, 0 is basicall repeating the prompt, 2 is more random. """
TEMPERATURE = 0.7
""" The hash key for the server. Leave this blank if you don't want to use it. """
HASHKEY = bytes(os.getenv('HASHKEY'),
UTF8) # shared secret for hmac of message
""" The maximum age of a message in seconds. Only used if HASHKEY is set."""
MAX_MESSAGE_AGE = 600
TEMPERATURE = 0.8

View File

@ -1,10 +1,11 @@
""" aidgaf-server setup.py """
from setuptools import setup, find_namespace_packages
setup(
name='aidgaf',
version='1',
description='',
long_description='',
version='0.1.0',
description='OpenAI GPT Implementation of IDGAF.',
long_description='Tells the user how much they don\'t GAF, using the power of OpenAI\'s GPT-3 API.',
author='Adam Outler',
author_email='adamoutler@gmail.com',
license='IDGAF License',