Compare commits
48 Commits
f5fc985be9
...
main
Author | SHA1 | Date | |
---|---|---|---|
1e2c77227b | |||
0923f41428 | |||
78ce7ab02a | |||
33f89916de | |||
d3872e4a62 | |||
fc00639e72 | |||
b5805c658f | |||
3444bc50ad | |||
1a91cbc8cc | |||
a649fa87a4 | |||
ce68767640 | |||
8c99b40b2a | |||
f1c7b195f3 | |||
b326642664 | |||
127892b5f2 | |||
a4c63a7177 | |||
a6e8e1a54f | |||
f142ad562e | |||
086f3b6ede | |||
18fb9c4679 | |||
7ea946de88 | |||
94f3c26b86 | |||
66b99c5d1b | |||
751affef70 | |||
2503b8ecaa | |||
d497707a05 | |||
e3e20a7b07 | |||
2752702d17 | |||
a6469184fb | |||
4b8e4f9846 | |||
d6ffa45ee6 | |||
ecb5bb42f0 | |||
b513b38398 | |||
87153f4194 | |||
3182959210 | |||
29c6ee9a90 | |||
6d5e90d278 | |||
5f2abddd89 | |||
5e0442a366 | |||
f7564146be | |||
c6848577cb | |||
ebca2990cd | |||
d3aa46e0fc | |||
72e545a0da | |||
baa241ca14 | |||
47cc5fae55 | |||
df753d77ee | |||
ade9944159 |
@ -7,11 +7,15 @@
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/python:1": {}
|
||||
},
|
||||
"onCreateCommand": "pip install openai; apt update; apt install git",
|
||||
"onCreateCommand": "pip install openai",
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"pomdtr.secrets"
|
||||
"pomdtr.secrets",
|
||||
"ms-python.python",
|
||||
"njpwerner.autodocstring",
|
||||
"NicolasVuillamy.vscode-groovy-lint",
|
||||
"ms-python.pylint"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
3
.vscode/extensions.json
vendored
3
.vscode/extensions.json
vendored
@ -1,5 +1,6 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"pomdtr.secrets"
|
||||
"pomdtr.secrets",
|
||||
"nicolasvuillamy.vscode-groovy-lint"
|
||||
]
|
||||
}
|
33
.vscode/launch.json
vendored
Normal file
33
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Start Server",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": "src/aidgaf/aidgaf_server/server.py",
|
||||
"console": "integratedTerminal",
|
||||
"justMyCode": true
|
||||
},
|
||||
{
|
||||
"name": "IDGAF",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": "src/aidgaf/aidgaf_server/idgaf.py",
|
||||
"console": "integratedTerminal",
|
||||
"justMyCode": true
|
||||
},
|
||||
|
||||
{
|
||||
"name": "Python: Current File",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": "${file}",
|
||||
"console": "integratedTerminal",
|
||||
"justMyCode": true
|
||||
}
|
||||
]
|
||||
}
|
10
.vscode/settings.json
vendored
10
.vscode/settings.json
vendored
@ -1,5 +1,11 @@
|
||||
{
|
||||
"secrets.enabledFolders": [
|
||||
"aidgaf"
|
||||
]
|
||||
"aidgaf",
|
||||
"default"
|
||||
],
|
||||
"files.associations": {
|
||||
"[Jj]enkinsfile*": "groovy"
|
||||
},
|
||||
"python.linting.pylintPath": "/usr/local/python/current/bin/python"
|
||||
|
||||
}
|
22
Dockerfile
Normal file
22
Dockerfile
Normal file
@ -0,0 +1,22 @@
|
||||
# docker build -t aidgaf .
|
||||
#for test
|
||||
# docker run --rm -eAPIKEY=sk-AaKV.........................HZ4v2ipzFm6 -p8087:8087 -it aidgaf
|
||||
# curl -X PATCH http://127.0.0.1:8087 -d "{\"message\":{\"command\":\"aidgaf\",\"data\":{\"username\":\"AdamOutler\"}}}"
|
||||
|
||||
#for deployment
|
||||
# docker run --rm \
|
||||
# -eSERVERPORT=8087 \
|
||||
# -eHOSTNAME=localhost \
|
||||
# -eHASHKEY=Password123 \
|
||||
# -eAPIKEY=sk-AaKV.........................HZ4v2ipzFm6 \
|
||||
# -p8087:8087 \
|
||||
# --restart=always \
|
||||
# aidgaf
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk add python3 py3-pip \
|
||||
&& pip3 install openai requests\
|
||||
&& mkdir /app
|
||||
COPY src/aidgaf /app/aidgaf
|
||||
EXPOSE 8087
|
||||
ENTRYPOINT ["/usr/bin/python","/app/aidgaf/aidgaf_server/__main__.py"," 2>&1 >/dev/stdout"]
|
56
Jenkinsfile
vendored
Normal file
56
Jenkinsfile
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
#!/bin/groovy
|
||||
/* groovylint-disable CompileStatic, DuplicateStringLiteral, LineLength */
|
||||
pipeline {
|
||||
agent {
|
||||
docker {
|
||||
alwaysPull true
|
||||
image 'docker.io/alpine:3.14'
|
||||
label 'Wrangler1'
|
||||
args '-u root'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Prepare Environment') {
|
||||
steps {
|
||||
sh '#!/bin/sh \n' +
|
||||
'id; apk add docker openrc git'
|
||||
}
|
||||
}
|
||||
stage('Obtain Source') {
|
||||
steps {
|
||||
git branch: 'main', url: 'https://git.adamoutler.com/aoutler/aidgaf-server.git'
|
||||
}
|
||||
}
|
||||
stage('Build in docker') {
|
||||
steps {
|
||||
// Get some code from a Git repository
|
||||
sh '#!/bin/sh \n' +
|
||||
'docker build -t aidgaf .'
|
||||
}
|
||||
}
|
||||
stage('setup credentials') {
|
||||
steps {
|
||||
withCredentials([ sshUserPrivateKey(credentialsId: 'dockeruserOn192.168.1.115', keyFileVariable: 'sshkey', usernameVariable: 'user')]) {
|
||||
sh '#!/bin/sh \n' +
|
||||
'set +e; docker logs aidgaf-server; docker stop aidgaf-server||echo machine stopped; docker rm aidgaf-server||echo machine does not exist; set -e'
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('export docker container') {
|
||||
steps {
|
||||
withCredentials([
|
||||
string(credentialsId: 'ai-hacked-your-info-key', variable: 'OPENWEBUIAPIKEY'),
|
||||
string(credentialsId: 'PapaHashingSecret', variable: 'PAPA_HASH'),
|
||||
string(credentialsId: 'PapaAsyncUrl', variable: 'ASYNC_URL'),
|
||||
sshUserPrivateKey(credentialsId: 'dockeruserOn192.168.1.115', keyFileVariable: 'sshkey')]) {
|
||||
sh '#!/bin/sh \n' +
|
||||
'mkdir -p ~/.ssh; cp "$sshkey" ~/.ssh/id_rsa'
|
||||
sh '#!/bin/sh \n' +
|
||||
/* groovylint-disable-next-line GStringExpressionWithinString */
|
||||
'docker run --name=aidgaf-server -eSERVERPORT=8087 -eHOSTNAME=0.0.0.0 -eHASHKEY="${PAPA_HASH}" -eAPIKEY="${OPENWEBUIAPIKEY}" -eASYNC_METHOD="PATCH" -eASYNC_URL="${ASYNC_URL}" -p8087:8087 -d --restart=always aidgaf'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
52
README.md
Normal file → Executable file
52
README.md
Normal file → Executable file
@ -0,0 +1,52 @@
|
||||
# How Much Do You Not Give a F***?
|
||||
A fun application that connects to OpenAI APIs to determine just how much you don't give a f***!
|
||||
|
||||
## Getting Started
|
||||
These instructions will get you a copy of the project up and running on your local machine for development and testing purposes.
|
||||
|
||||
## Prerequisites
|
||||
* Docker
|
||||
* An OpenAI API key, obtained from the OpenAI website.
|
||||
|
||||
## Installing
|
||||
1. Clone the repository to your local machine:
|
||||
``` bash
|
||||
git clone https://git.adamoutler.com/aoutler/aidgaf-server
|
||||
```
|
||||
2. Open the project in Visual Studio Code.
|
||||
3. Follow the prompts to open the Devcontainer and begin developing.
|
||||
|
||||
## Development
|
||||
1. Set the APIKEY environment variable by using the Secrets extension in Visual Studio Code.
|
||||
2. Press F5 in Visual Studio Code to start the server.
|
||||
## Usage
|
||||
The server accepts a message in the following format:
|
||||
|
||||
``` json
|
||||
{"message":{"command":"aidgaf","data":{"username":"AdamOutler"},"timestamp":1676231329}}
|
||||
```
|
||||
## Built With
|
||||
* [Docker](https://www.docker.com/)
|
||||
* [Visual Studio Code](https://code.visualstudio.com/)
|
||||
* [OpenAI](https://openai.com/)
|
||||
* [Python](https://www.python.org/)
|
||||
* [Automated builds by Jenkins](https://jenkins.adamoutler.com/blue/organizations/jenkins/Update%20IDGAF%20Server/activity)
|
||||
## Contributing
|
||||
If you would like to contribute to this project, please fork the repository and submit a pull request.
|
||||
|
||||
## License
|
||||
Copyright 2023 Adam Outler
|
||||
|
||||
Licensed under the I Dont Give A F License, Version 1.1 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
rule 0. Not for use in California.
|
||||
|
||||
rule 3. Send an email to idgaf@hackedyour.info if you find this helpful.
|
||||
|
||||
Note: If you're wondering where numbers 1 and 2 are, IDGAF.
|
||||
|
||||
## Acknowledgments
|
||||
* This README was generated using OpenAI's language model, ChatGPT.
|
||||
* The Python code in other areas was documented using Github Copilot.
|
||||
* AI is used for documentation because the author didn't give a f*** enough to write it himself.
|
||||
|
@ -1,3 +0,0 @@
|
||||
|
||||
|
||||
name = 'aidgaf'
|
@ -1,90 +0,0 @@
|
||||
import json
|
||||
import requests
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
import time
|
||||
from datetime import datetime
|
||||
import openai
|
||||
import random
|
||||
import settings
|
||||
|
||||
openai.organization = "org-hNNV1yHjZp7T3pn5pdZWaKLm"
|
||||
#print(openai.Model.list())
|
||||
|
||||
|
||||
|
||||
URL="https://api.openai.com/v1/completions"
|
||||
PROMPTS=["Say \"USERNAME does not give a fuck\" in a thoughtful and clever paragraph of 5 sentences.",
|
||||
"Say \"USERNAME does not give a fuck\" in a Dr Suess poem.",
|
||||
"Tell me all about how much \"USERNAME does not give a fuck\" using your most colorful words."]
|
||||
DATA = {"model": "text-davinci-003",
|
||||
"prompt": PROMPTS[0],
|
||||
"temperature":1,
|
||||
"max_tokens": 200
|
||||
}
|
||||
|
||||
request_headers={"Authorization": "Bearer "+settings.APIKEY,"Content-Type": "application/json"}
|
||||
|
||||
|
||||
class IDGAFServer(BaseHTTPRequestHandler):
|
||||
|
||||
def do_PATCH(self):
|
||||
# print("Request: "+self.request+ " "+webServer.get_request())
|
||||
request_body=self.rfile.read(int(self.headers.get('Content-Length')))
|
||||
command=json.loads(request_body)
|
||||
print(command)
|
||||
if command['message']['command'] == 'aidgaf':
|
||||
[responseCode,response_body]=parse_idgaf_request(command)
|
||||
self.handle_response(responseCode,response_body)
|
||||
print("sending:"+response_body)
|
||||
|
||||
|
||||
|
||||
def handle_response(self,code,body):
|
||||
self.send_response(code)
|
||||
self.send_header("Content-type", "text/html")
|
||||
self.end_headers()
|
||||
self.wfile.write(bytes(body,"UTF-8"))
|
||||
|
||||
|
||||
def get_response_base_object(text):
|
||||
resultObject={}
|
||||
resultObject["message"]={}
|
||||
resultObject["message"]["data"]={}
|
||||
resultObject["message"]["data"]["resultObject"]=text
|
||||
resultObject["timestamp"]=datetime.utcnow().timestamp()
|
||||
return resultObject
|
||||
|
||||
|
||||
def parse_idgaf_request(command):
|
||||
the_data=get_prompt(command)
|
||||
gpt_response=requests.post(URL, json=the_data, headers=request_headers)
|
||||
print(gpt_response)
|
||||
response_text=gpt_response.json()['choices'][0]['text'].strip()
|
||||
obj=get_response_base_object(response_text)
|
||||
json_result=json.dumps(obj)
|
||||
return [gpt_response.status_code,json_result]
|
||||
|
||||
def get_prompt(command):
|
||||
my_prompt=random.choice(PROMPTS)
|
||||
my_prompt=my_prompt.replace("USERNAME",command['message']['data']['username'])
|
||||
|
||||
print("Prompt selected: "+my_prompt)
|
||||
the_data=DATA
|
||||
the_data["prompt"]=my_prompt
|
||||
return the_data
|
||||
|
||||
|
||||
value='{"service": "papa", "message": {"command": "aidgaf", "data": {"username": "AdamOutler"}, "timestamp": 1675373229}, "hash": "de08e85b8afc3b7d257fa559fd1dd295838ea1387f1e0fa75ceb11d9da81e59fadfc878d028281dfb739002bae818b89a8363e49d68e923874c969716a90f8e3"}'
|
||||
if __name__ == "__main__":
|
||||
print(parse_idgaf_request(command=json.loads(value)))
|
||||
|
||||
|
||||
#curl https://api.openai.com/v1/completions \
|
||||
# -H "Content-Type: application/json" \
|
||||
# -H "Authorization: Bearer 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" \
|
||||
# -d '{"model": "text-curie-001",
|
||||
# "prompt": "Say \"Adam does not give a fuck\" in a thoughtful and clever prose consisting of one to five paragraphs.",
|
||||
# "temperature":1, "max_tokens": 500}'
|
||||
# |jq -r .choices[0].text
|
||||
# curl -X PATCH 127.0.0.1:8087 -d '{"message":{"command":"aidgaf","data":{"username":"AdamOutler"}}}'
|
||||
#2,500,000 tokens = $5
|
@ -1,17 +0,0 @@
|
||||
import idgaf
|
||||
from idgaf import IDGAFServer
|
||||
from http.server import HTTPServer
|
||||
import settings
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
webServer = HTTPServer((settings.HOSTNAME, settings.SERVERPORT), idgaf.IDGAFServer)
|
||||
print("Server started http://%s:%s" % (settings.HOSTNAME, settings.SERVERPORT))
|
||||
|
||||
try:
|
||||
webServer.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
webServer.server_close()
|
||||
print("Server stopped.")
|
@ -1,7 +0,0 @@
|
||||
import os
|
||||
#The hostname used by this app
|
||||
HOSTNAME:str = os.getenv('HOSTNAME') #localhost
|
||||
#The port to broadcast the server
|
||||
SERVERPORT:int = int(os.getenv('SERVERPORT')) #8087
|
||||
#The API key for OpenAI
|
||||
APIKEY:str = os.getenv('APIKEY') #secret key
|
7
src/aidgaf/aidgaf_server/__init__.py
Normal file
7
src/aidgaf/aidgaf_server/__init__.py
Normal file
@ -0,0 +1,7 @@
|
||||
""" aidgaf-server. """
|
||||
|
||||
# The name of the package
|
||||
name = 'aidgaf_server' # pylint: disable=C0103
|
||||
|
||||
# The version of the package
|
||||
version = '0.1.0' # pylint: disable=C0103
|
5
src/aidgaf/aidgaf_server/__main__.py
Normal file
5
src/aidgaf/aidgaf_server/__main__.py
Normal file
@ -0,0 +1,5 @@
|
||||
""" aidgaf-server entry point. """
|
||||
import server
|
||||
|
||||
# The main entry point for the aidgaf-server package.
|
||||
server.main()
|
10
src/aidgaf/aidgaf_server/const.py
Normal file
10
src/aidgaf/aidgaf_server/const.py
Normal file
@ -0,0 +1,10 @@
|
||||
"""Constants for the aidgaf-server package."""
|
||||
|
||||
UTF8="utf-8"
|
||||
""" The encoding to use for strings. """
|
||||
|
||||
OPENAI_TIMEOUT=30
|
||||
""" The timeout for OpenAI requests. """
|
||||
|
||||
REPORTING_TIMEOUT=10
|
||||
""" The timeout for async reporting requests. """
|
20
src/aidgaf/aidgaf_server/hash_calculator.py
Normal file
20
src/aidgaf/aidgaf_server/hash_calculator.py
Normal file
@ -0,0 +1,20 @@
|
||||
""" This module contains the function to calculate the hash of a value and a secret."""
|
||||
import hashlib
|
||||
|
||||
|
||||
def calculate_hash(value: bytes, secret: bytes) -> str:
|
||||
""" This function calculates the hash of a value and a secret.
|
||||
It is used to verify that the message is from the server. The hash
|
||||
is calculated using the SHA512 algorithm. The hash is returned as a
|
||||
hex string. This is not a secure hash, but it is good enough for
|
||||
this application.
|
||||
|
||||
parameters:
|
||||
value: The value to hash.
|
||||
secret: The secret to hash with the value.
|
||||
returns:
|
||||
The hash of the value and secret. This is a hex string.
|
||||
"""
|
||||
sha512 = hashlib.sha512()
|
||||
sha512.update(b"".join([value, secret]))
|
||||
return sha512.hexdigest()
|
106
src/aidgaf/aidgaf_server/idgaf.py
Normal file
106
src/aidgaf/aidgaf_server/idgaf.py
Normal file
@ -0,0 +1,106 @@
|
||||
""" This file contains the logic for the IDGAF server. It can be used to make a stand-alone
|
||||
IDGAF server, or it can be used as a module in a larger application.
|
||||
"""
|
||||
from datetime import datetime
|
||||
import json
|
||||
import random
|
||||
import requests
|
||||
import settings
|
||||
from const import OPENAI_TIMEOUT
|
||||
|
||||
URL = "https://ai.hackedyour.info/api/chat/completions"
|
||||
""" The URL for the OpenAI API. """
|
||||
|
||||
DATA = {"model": settings.OPEN_AI_COMPLETION_MODEL,
|
||||
"messages": [{"role":"system", "content":"You are AIDGAF server. You tell about how much people dont give a fuck"},{"role":"user", "content": settings.PROMPTS[0] }],
|
||||
"temperature": settings.TEMPERATURE,
|
||||
"max_tokens": settings.OPEN_AI_MAX_TOKENS
|
||||
}
|
||||
""" The data to send to OpenAI. """
|
||||
|
||||
request_headers = {"Authorization": "Bearer " +
|
||||
settings.APIKEY, "Content-Type": "application/json"}
|
||||
""" The headers to send to OpenAI. """
|
||||
|
||||
|
||||
def get_response_base_object(text: str) -> dict:
|
||||
""" This is used to create the response object for the server.
|
||||
Parameters:
|
||||
text: The text to return to the client.
|
||||
Returns:
|
||||
A dictionary containing the response object.
|
||||
"""
|
||||
result_object = {}
|
||||
result_object["message"] = {}
|
||||
result_object["service"] = "AIDGAF Server"
|
||||
result_object["message"]["data"] = {}
|
||||
result_object["message"]["data"]["resultObject"] = text
|
||||
result_object["timestamp"] = datetime.utcnow().timestamp()
|
||||
return result_object
|
||||
|
||||
|
||||
def parse_idgaf_request(idgaf_command) -> [int, dict]:
|
||||
""" This function handles the IDGAF command. It will return a response object.
|
||||
Parameters:
|
||||
command: The command object received from the client.
|
||||
Returns:
|
||||
A tuple containing the status code and the response object.
|
||||
"""
|
||||
the_data = get_prompt(idgaf_command)
|
||||
response = get_gpt_response(the_data)
|
||||
try:
|
||||
response_text = response.json()['choices'][0]['message']["content"].strip()
|
||||
|
||||
except KeyError:
|
||||
response_text = response.text
|
||||
print(response_text)
|
||||
obj = get_response_base_object(response_text)
|
||||
return [response.status_code, obj]
|
||||
|
||||
|
||||
def get_gpt_response(data) -> requests.Response:
|
||||
""" This function communicates with OpenAI and returns a response object.
|
||||
Parameters:
|
||||
data: The data to send to OpenAI.
|
||||
Returns:
|
||||
The response object from OpenAI.
|
||||
"""
|
||||
gpt_response = requests.post(
|
||||
url=URL, json=data, headers=request_headers, timeout=OPENAI_TIMEOUT)
|
||||
return gpt_response
|
||||
|
||||
|
||||
def get_prompt(command) -> dict:
|
||||
""" Selects a prompt from the PROMPTS list and replaces the USERNAME placeholder
|
||||
with the username.
|
||||
Parameters:
|
||||
command: The command object received from the client.
|
||||
Returns:
|
||||
A dictionary containing the data to send to OpenAI.
|
||||
"""
|
||||
replyTo=command['message']['data'].get('replyTo',"")
|
||||
replyText=command['message']['data'].get('replyText',"")
|
||||
inputText=command['message']['data'].get('inputText',"")
|
||||
my_prompt = random.choice(settings.PROMPTS)
|
||||
my_prompt = my_prompt.replace(
|
||||
"USERNAME", command['message']['data']['username'])
|
||||
if replyTo:
|
||||
my_prompt=replyTo +"said \""+replyText+".\"\n In response, "+my_prompt
|
||||
if inputText:
|
||||
my_prompt="With the following in mind: "+ command['message']['data']['username'] +" doesn't care about \""+inputText+"\".\n\n"+my_prompt
|
||||
|
||||
print(my_prompt)
|
||||
the_data = DATA
|
||||
the_data["messages"][-1]["content"] = my_prompt
|
||||
return the_data
|
||||
|
||||
|
||||
# This function is for testing the IDGAF capabilities without a server.
|
||||
if __name__ == "__main__":
|
||||
INPUT = '''{"service":"papa","message":
|
||||
{"command":"aidgaf","data":{"username":"AdamOutler"},
|
||||
"timestamp":1675725191},
|
||||
"hash":"1bc73914478835d03f9ebdfb46328321d2bb656647e2876d6f162cc1860607fcfca8d825c48e390a6a254ee0835c8a4fe5f9a25795a3a0880ae5a23e9c132cf2"}'''
|
||||
test_command = json.loads(INPUT)
|
||||
[code, result] = parse_idgaf_request(test_command)
|
||||
print(result)
|
31
src/aidgaf/aidgaf_server/merge.py
Normal file
31
src/aidgaf/aidgaf_server/merge.py
Normal file
@ -0,0 +1,31 @@
|
||||
"""This module contains the merge_dict function."""
|
||||
|
||||
|
||||
def merge_dict_no_overwrite(base_dictionary, other_dictionary, location=None)->dict:
|
||||
""" This function merges two dictionaries. If the same key exists in both dictionaries,
|
||||
the value from the other_dictionary will be used. This function will recurse into nested
|
||||
dictionaries. If the same key exists in both dictionaries, and the value is a dictionary,
|
||||
the function will recurse into the nested dictionary. If the same key exists in both
|
||||
dictionaries, and the value is not a dictionary, the value from the base_dictionary will
|
||||
be used.
|
||||
|
||||
Parameters:
|
||||
base_dictionary: The dictionary to merge the other_dictionary into.
|
||||
other_dictionary: The dictionary to merge keys from. if the same key exists in both
|
||||
dictionaries, the value from the base_dictionary will be used.
|
||||
location: leave blank. This is used to track the location of the key in the dictionary.
|
||||
Returns:
|
||||
The merged dictionary.
|
||||
|
||||
"""
|
||||
if location is None:
|
||||
location=[]
|
||||
for key in other_dictionary:
|
||||
if key in base_dictionary:
|
||||
if isinstance(base_dictionary[key], dict) and isinstance(other_dictionary[key], dict):
|
||||
merge_dict_no_overwrite(base_dictionary[key], other_dictionary[key],[str(key)])
|
||||
else:
|
||||
pass
|
||||
else:
|
||||
base_dictionary[key] = other_dictionary[key]
|
||||
return base_dictionary
|
56
src/aidgaf/aidgaf_server/security.py
Normal file
56
src/aidgaf/aidgaf_server/security.py
Normal file
@ -0,0 +1,56 @@
|
||||
""" Security functions for the server. """
|
||||
import re
|
||||
import time
|
||||
import hash_calculator
|
||||
import settings
|
||||
from const import UTF8
|
||||
|
||||
def perform_hash_checks(str_request)->bool:
|
||||
""" Performs a hash check on the message, and verifies the timestamp is valid.
|
||||
If either check fails, return False. Otherwise, return True.
|
||||
|
||||
Parameters:
|
||||
str_request: The request body, as a string.
|
||||
Returns:
|
||||
True if the message is valid, False otherwise."""
|
||||
my_hash = get_message_hash(str_request)
|
||||
|
||||
if my_hash not in str_request:
|
||||
print("Error: hash not match")
|
||||
return False
|
||||
if not verify_message_time(str_request):
|
||||
print("Error: timestamp expired")
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_message_hash(json_command) -> str:
|
||||
""" Get the object named "message", and run a hash over it. The hash is calculated
|
||||
using the SHA512 algorithm. The hash is returned as a hex string.
|
||||
Parameters:
|
||||
json_command: The JSON command to hash.
|
||||
Returns:
|
||||
The hash of the message, as a string. """
|
||||
|
||||
strip1 = re.sub(".*\"message\":", "", json_command, 1)
|
||||
if "\"hash\":" in strip1:
|
||||
strip2 = re.sub(',\"hash\":.*', '', strip1)
|
||||
else:
|
||||
strip2 = strip1[:-1]
|
||||
json_value = bytes(strip2, UTF8)
|
||||
if settings.HASHKEY is not None:
|
||||
hash_value = hash_calculator.calculate_hash(
|
||||
json_value, settings.HASHKEY)
|
||||
return hash_value
|
||||
return ""
|
||||
|
||||
|
||||
def verify_message_time(json_command) -> bool:
|
||||
"""Check message expiration is less than current time + time in settings.
|
||||
Before we accept the JSON as valid, and parse it, we must check the timestamp.
|
||||
The timestamp is a Linux Timestamp. So convert it, add maximum message age, and
|
||||
then verify current time is less than expiration time.
|
||||
"""
|
||||
strip_json = re.sub('.*\"timestamp\":', "", json_command, 1)
|
||||
expiration = int(re.sub('}.*', '', strip_json))+settings.MAX_MESSAGE_AGE
|
||||
current_time = int(time.time())
|
||||
return not current_time > expiration
|
170
src/aidgaf/aidgaf_server/server.py
Normal file
170
src/aidgaf/aidgaf_server/server.py
Normal file
@ -0,0 +1,170 @@
|
||||
""" aidgaf-server - A simple server to handle requests from the aidgaf client.
|
||||
This server will communicate with OpenAI to generate responses to the client."""
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
import json
|
||||
import threading
|
||||
import requests
|
||||
|
||||
import merge
|
||||
import idgaf
|
||||
import settings
|
||||
import security
|
||||
from const import UTF8, REPORTING_TIMEOUT
|
||||
|
||||
default_request_body = b'{"message":{"command":"aidgaf","data":{"username":"AdamOutler"}}}'
|
||||
request_headers = {}
|
||||
|
||||
|
||||
class IDGAFServer(BaseHTTPRequestHandler):
|
||||
""" This class handles the requests from the client. """
|
||||
|
||||
def do_GET(self):
|
||||
""" This function handles GET requests. """
|
||||
self.send_response(418)
|
||||
self.send_header("Content-type", "text/html")
|
||||
self.end_headers()
|
||||
self.wfile.write(bytes("HTTP 408\nI'm a teapot", UTF8))
|
||||
self.close_connection
|
||||
|
||||
def do_PATCH(self):
|
||||
""" This function handles PATCH requests. """
|
||||
body = self.get_body().decode(UTF8)
|
||||
check_result = perform_sanity_checks(body)
|
||||
if check_result:
|
||||
self.send_response(403)
|
||||
self.end_headers()
|
||||
self.wfile.write(bytes(check_result, UTF8))
|
||||
self.close_connection
|
||||
return
|
||||
command = json.loads(body)
|
||||
self.do_request_handling(command)
|
||||
|
||||
def do_request_handling(self, command):
|
||||
""" This function handles the request.
|
||||
Parameters:
|
||||
command: The command object received from the client. """
|
||||
print(command)
|
||||
if command['message']['command'] == 'aidgaf':
|
||||
self.idgaf_command_handler(command)
|
||||
|
||||
def idgaf_command_handler(self, command):
|
||||
""" This function handles the idgaf command. It will generate a
|
||||
response and send it to the client.
|
||||
Parameters:
|
||||
command: The command object received from the client. """
|
||||
if not settings.ASYNC_URL:
|
||||
[code, body] = self.getGPTResponse(command)
|
||||
self.handle_response(code, body)
|
||||
return
|
||||
|
||||
thread = threading.Thread(
|
||||
target=self.send_async_request, args=(command,))
|
||||
thread.start()
|
||||
self.handle_response(200, json.dumps(command))
|
||||
|
||||
def send_async_request(self, command):
|
||||
""" This function sends the request to the async URL. It will generate a
|
||||
response and send it to the client. This function is called in a separate
|
||||
thread. It will not block the main thread.
|
||||
|
||||
Parameters:
|
||||
command: The command object received from the client. """
|
||||
print("using async for response")
|
||||
[code, body] = self.getGPTResponse(command)
|
||||
merged = merge.merge_dict_no_overwrite(json.loads(body), command)
|
||||
merged['hash'] = security.get_message_hash(json.dumps(merged))
|
||||
merged['service'] = 'JavaShark'
|
||||
data = json.dumps(merged)
|
||||
print(str(code)+": "+data)
|
||||
|
||||
if (code != 200):
|
||||
return
|
||||
if (settings.ASYNC_METHOD == "POST"):
|
||||
requests.post(settings.ASYNC_URL, data=data, headers=request_headers,
|
||||
timeout=REPORTING_TIMEOUT)
|
||||
elif (settings.ASYNC_METHOD == "PUT"):
|
||||
requests.put(settings.ASYNC_URL, data=data, headers=request_headers,
|
||||
timeout=REPORTING_TIMEOUT)
|
||||
elif (settings.ASYNC_METHOD == "PATCH"):
|
||||
resp = requests.patch(settings.ASYNC_URL, data=data, headers=request_headers,
|
||||
timeout=REPORTING_TIMEOUT)
|
||||
print(str(resp)+resp.text)
|
||||
|
||||
# gpt_response = requests.post(URL, json=data, headers=request_headers)
|
||||
|
||||
def getGPTResponse(self, command) -> [int, str]:
|
||||
""" This function generates a response using OpenAI. It will return
|
||||
the response code and the response body.
|
||||
Parameters:
|
||||
command: The command object received from the client.
|
||||
Returns:
|
||||
The response code and the response body. """
|
||||
[responseCode, json_response] = idgaf.parse_idgaf_request(command)
|
||||
json_response['hash'] = security.get_message_hash(json.dumps(command))
|
||||
response_body = json.dumps(json_response)
|
||||
return [responseCode, response_body]
|
||||
|
||||
def get_body(self) -> bytes:
|
||||
""" This function returns the body of the request.
|
||||
Returns:
|
||||
The body of the request.
|
||||
"""
|
||||
header_length = self.headers.get('Content-Length')
|
||||
request_body = default_request_body
|
||||
if header_length != None and self.headers.get('Content-Length') != None:
|
||||
request_body = self.rfile.read(
|
||||
int(self.headers.get('Content-Length')))
|
||||
str_request = str(request_body, UTF8)
|
||||
print(request_body)
|
||||
return request_body
|
||||
|
||||
def handle_response(self, code, body):
|
||||
""" This function handles the response to the client.
|
||||
Parameters:
|
||||
code: The HTTP response code.
|
||||
body: The body of the response. """
|
||||
self.send_response(code)
|
||||
self.send_header("Content-type", "text/html")
|
||||
self.end_headers()
|
||||
print("sending:"+body)
|
||||
self.wfile.write(bytes(body, UTF8))
|
||||
self.close_connection
|
||||
|
||||
|
||||
def perform_sanity_checks(str_request) -> str:
|
||||
""" Performs a hash check on the message, and verifies the timestamp is valid.
|
||||
If either check fails, return False. Otherwise, return True.
|
||||
Parameters:
|
||||
str_request: The request body as a string.
|
||||
Returns:
|
||||
True if the message is valid, False otherwise. """
|
||||
if settings.HASHKEY is not None:
|
||||
hash = security.get_message_hash(str_request)
|
||||
if settings.HASHKEY and hash not in str_request:
|
||||
print("Error: hash not match")
|
||||
return "Error: hash not match"
|
||||
if settings.HASHKEY and not security.verify_message_time(str_request):
|
||||
print("Error: timestamp expired")
|
||||
return "Error: timestamp expired"
|
||||
return ""
|
||||
|
||||
|
||||
def main():
|
||||
""" This function starts the server. """
|
||||
webServer = HTTPServer(
|
||||
(settings.HOSTNAME, settings.SERVERPORT), IDGAFServer)
|
||||
print("Server started http://%s:%s" %
|
||||
(settings.HOSTNAME, settings.SERVERPORT))
|
||||
|
||||
try:
|
||||
webServer.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
webServer.server_close()
|
||||
print("Server stopped.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
""" This function starts the main method. """
|
||||
main()
|
49
src/aidgaf/aidgaf_server/settings.py
Normal file
49
src/aidgaf/aidgaf_server/settings.py
Normal file
@ -0,0 +1,49 @@
|
||||
"""this is the settings file for the server. It contains the settings for the server.
|
||||
"""
|
||||
import os
|
||||
from const import UTF8
|
||||
|
||||
""" The hostname of the server. """
|
||||
HOSTNAME: str = os.getenv('HOSTNAME') # localhost or some name
|
||||
|
||||
""" The API key for OpenAI"""
|
||||
APIKEY: str = os.getenv('APIKEY') # secret key from OpenAPI website
|
||||
if APIKEY is None:
|
||||
raise Exception("APIKEY Environmental Variable must be set")
|
||||
""" The port to broadcast the server """
|
||||
|
||||
"""The URL to send async requests to"""
|
||||
ASYNC_URL: str=os.getenv('ASYNC_URL')
|
||||
|
||||
""" The method to use for async requests. """
|
||||
ASYNC_METHOD: str=os.getenv('ASYNC_METHOD')
|
||||
|
||||
|
||||
"""8087 or the port you want to run on. pass in with docker -e command."""
|
||||
SERVERPORT: int = 8087
|
||||
|
||||
|
||||
""" The prompts used for OpenAI. When the server receives a request, it will
|
||||
randomly select one of these prompts to use."""
|
||||
PROMPTS = [
|
||||
"Say \"USERNAME does not give a fuck\" using 4 separate Haikus, and be sure to mention they are haikus before or after.",
|
||||
"Say \"USERNAME does not give a fuck\" within a 10 line Dr Suess poem." #,
|
||||
"Tell me a funny, impossible, story about USERNAME. Make USERNAME seem relatable at the end. Make up an outrageous situation where the moral of the story is: \"USERNAME does not give a fuck\" to this very day.",
|
||||
"Say \"USERNAME is completely apethetic and does not give a fuck\" in a verbose manner, using your most colorful words and one metaphor."
|
||||
]
|
||||
|
||||
""" The maximum number of tokens to use in a single OpenAI request. """
|
||||
OPEN_AI_MAX_TOKENS = 1000
|
||||
|
||||
""" The model to use for OpenAI. """
|
||||
OPEN_AI_COMPLETION_MODEL = "granite3-dense:2b"
|
||||
|
||||
""" The temperature to use for OpenAI. 0-2, 0 is basicall repeating the prompt, 2 is more random. """
|
||||
TEMPERATURE = 0.8
|
||||
|
||||
""" The hash key for the server. Leave this blank if you don't want to use it. """
|
||||
HASHKEY = bytes(os.getenv('HASHKEY') or "",UTF8) # shared secret for hmac of message
|
||||
if (HASHKEY == ""):
|
||||
HASHKEY=None
|
||||
""" The maximum age of a message in seconds. Only used if HASHKEY is set."""
|
||||
MAX_MESSAGE_AGE = 600
|
@ -1,10 +1,11 @@
|
||||
""" aidgaf-server setup.py """
|
||||
from setuptools import setup, find_namespace_packages
|
||||
|
||||
setup(
|
||||
name='aidgaf',
|
||||
version='1',
|
||||
description='',
|
||||
long_description='',
|
||||
version='0.1.0',
|
||||
description='OpenAI GPT Implementation of IDGAF.',
|
||||
long_description='Tells the user how much they don\'t GAF, using the power of OpenAI\'s GPT-3 API.',
|
||||
author='Adam Outler',
|
||||
author_email='adamoutler@gmail.com',
|
||||
license='IDGAF License',
|
||||
|
Reference in New Issue
Block a user