Compare commits

..

No commits in common. "b513b383980f8b8f11c23e9a46f906bc41af7165" and "31829592104f51ddd971c1b034845cfa06ab98ac" have entirely different histories.

11 changed files with 61 additions and 202 deletions

View File

@ -13,10 +13,7 @@
"extensions": [
"pomdtr.secrets",
"ms-python.python",
"donjayamanne.python-extension-pack",
"ivory-lab.jenkinsfile-support",
"njpwerner.autodocstring",
"KevinRose.vsc-python-indent"
"donjayamanne.python-extension-pack"
]
}
}

69
Jenkinsfile vendored
View File

@ -1,69 +0,0 @@
#! /bin/groovy
pipeline {
agent {
docker {
alwaysPull true
image 'alpine:3.14'
label 'Wrangler1'
args '-u root'
}
}
stages {
stage('Prepare Environment'){
steps{
sh "#!/bin/sh \n" +
'id; apk add docker openrc git'
}
}
stage('Obtain Source'){
steps {
git branch: 'main', url: 'https://git.adamoutler.com/aoutler/aidgaf-server.git'
}
}
stage('Build in docker') {
steps {
// Get some code from a Git repository
sh "#!/bin/sh \n" +
'docker build -t aidgaf .'
}
}
stage ("setup credentials"){
steps{
withCredentials([ sshUserPrivateKey(credentialsId: 'dockeruserOn192.168.1.115', keyFileVariable: 'sshkey', usernameVariable: 'user')]) {
sh "#!/bin/sh \n" +
'set +e; docker stop aidgaf-server||echo machine stopped; docker rm aidgaf-server||echo machine does not exist; set -e'
}
}
}
stage ('export docker container'){
steps {
sh "#!/bin/sh \n" +
'set +e; docker stop aidgaf-server||echo machine stopped; docker rm aidgaf-server||echo machine does not exist; set -e'
withCredentials([
string(credentialsId: 'OpenAI-API-Token', variable: 'OPEN_AI_TOKEN'),
string(credentialsId: 'PapaHashingSecret', variable: 'PAPA_HASH'),
sshUserPrivateKey(credentialsId: 'dockeruserOn192.168.1.115', keyFileVariable: 'sshkey', usernameVariable: 'user')
]) {
sh "#!/bin/sh \n" +
'mkdir -p ~/.ssh; cp "${sshkey}" ~/.ssh/id_rsa'
sh "#!/bin/sh \n" +
'docker run --name=aidgaf-server -eSERVERPORT=8087 -eHOSTNAME=0.0.0.0 -eHASHKEY="${PAPA_HASH}" -eAPIKEY="${OPEN_AI_TOKEN}" -p8087:8087 -d --restart=always aidgaf'
}
}
}
// To run Maven on a Windows agent, use
// bat "mvn -Dmaven.test.failure.ignore=true clean package"
}
}

View File

@ -1,7 +1,3 @@
""" aidgaf-server. """
""" The name of the package"""
name = 'aidgaf'
""" The version of the package"""
version='0.1.0'
name = 'aidgaf'

View File

@ -1,5 +1,3 @@
""" aidgaf-server entry point. """
import server
""" The main entry point for the aidgaf-server package."""
server.main()

View File

@ -1,4 +1 @@
"""Constants for the aidgaf-server package."""
""" The encoding to use for strings. """
UTF8="utf-8"

View File

@ -1,18 +1,6 @@
import hashlib
def calculate_hash(value:bytes, secret:bytes)->str:
""" This function calculates the hash of a value and a secret.
It is used to verify that the message is from the server. The hash
is calculated using the SHA512 algorithm. The hash is returned as a
hex string. This is not a secure hash, but it is good enough for
this application.
parameters:
value: The value to hash.
secret: The secret to hash with the value.
returns:
The hash of the value and secret. This is a hex string.
"""
m = hashlib.sha512()
m.update(b"".join([value,secret]))
return m.hexdigest()

View File

@ -1,6 +1,3 @@
""" This file contains the logic for the IDGAF server. It can be used to make a stand-alone
IDGAF server, or it can be used as a module in a larger application.
"""
import json
import random
import security
@ -8,29 +5,26 @@ import re
import requests
from time import time
from datetime import datetime
import openai
import settings
""" The URL for the OpenAI API. """
openai.organization = "org-hNNV1yHjZp7T3pn5pdZWaKLm"
# print(openai.Model.list())
URL = "https://api.openai.com/v1/completions"
""" The data to send to OpenAI. """
DATA = {"model": settings.OPEN_AI_COMPLETION_MODEL,
"prompt": settings.PROMPTS[0],
"temperature": settings.TEMPERATURE,
"max_tokens": settings.OPEN_AI_MAX_TOKENS
}
""" The headers to send to OpenAI. """
request_headers = {"Authorization": "Bearer " +
settings.APIKEY, "Content-Type": "application/json"}
def get_response_base_object(text:str) -> dict:
""" This is used to create the response object for the server.
Parameters:
text: The text to return to the client.
Returns:
A dictionary containing the response object.
"""
def get_response_base_object(text):
resultObject = {}
resultObject["message"] = {}
resultObject["service"] = "AIDGAF Server"
@ -39,16 +33,9 @@ def get_response_base_object(text:str) -> dict:
resultObject["timestamp"] = datetime.utcnow().timestamp()
return resultObject
def parse_idgaf_request(command)->[int, dict]:
""" This function handles the IDGAF command. It will return a response object.
Parameters:
command: The command object received from the client.
Returns:
A tuple containing the status code and the response object.
"""
def parse_idgaf_request(command):
the_data = get_prompt(command)
response = get_gpt_response(the_data)
response=get_gpt_response(the_data)
try:
response_text = response.json()['choices'][0]['text'].strip()
except (KeyError):
@ -56,23 +43,12 @@ def parse_idgaf_request(command)->[int, dict]:
obj = get_response_base_object(response_text)
return [response.status_code, obj]
def get_gpt_response(data) -> requests.Response:
""" This function communicates with OpenAI and returns a response object.
Parameters:
data: The data to send to OpenAI.
Returns:
The response object from OpenAI.
"""
def get_gpt_response(data):
gpt_response = requests.post(URL, json=data, headers=request_headers)
return gpt_response
def get_prompt(command)->dict:
""" Selects a prompt from the PROMPTS list and replaces the USERNAME placeholder with the username.
Parameters:
command: The command object received from the client.
Returns:
A dictionary containing the data to send to OpenAI.
"""
def get_prompt(command):
my_prompt = random.choice(settings.PROMPTS)
my_prompt = my_prompt.replace(
"USERNAME", command['message']['data']['username'])
@ -83,9 +59,35 @@ def get_prompt(command)->dict:
return the_data
def get_prompt(command):
my_prompt = random.choice(settings.PROMPTS)
my_prompt = my_prompt.replace(
"USERNAME", command['message']['data']['username'])
print("Prompt selected: "+my_prompt)
the_data = DATA
the_data["prompt"] = my_prompt
return the_data
value = '{"service":"papa","message":{"command":"aidgaf","data":{"username":"AdamOutler"},"timestamp":1675725191},"hash":"1bc73914478835d03f9ebdfb46328321d2bb656647e2876d6f162cc1860607fcfca8d825c48e390a6a254ee0835c8a4fe5f9a25795a3a0880ae5a23e9c132cf2"}'
if __name__ == "__main__":
""" This function is for testing the IDGAF capabilities without a server. """
value = '{"service":"papa","message":{"command":"aidgaf","data":{"username":"AdamOutler"},"timestamp":1675725191},"hash":"1bc73914478835d03f9ebdfb46328321d2bb656647e2876d6f162cc1860607fcfca8d825c48e390a6a254ee0835c8a4fe5f9a25795a3a0880ae5a23e9c132cf2"}'
command = json.loads(value)
[code, result] = parse_idgaf_request(command)
print(result)
# curl https://api.openai.com/v1/completions \
# -H "Content-Type: application/json" \
# -H "Authorization: Bearer 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" \
# -d '{"model": "text-curie-001",
# "prompt": "Say \"Adam does not give a fuck\" in a thoughtful and clever prose consisting of one to five paragraphs.",
# "temperature":1, "max_tokens": 500}'
# |jq -r .choices[0].text
# curl -X PATCH 127.0.0.1:8087 -d '{"message":{"command":"aidgaf","data":{"username":"AdamOutler"}}}'
# 2,500,000 tokens = $5

View File

@ -1,18 +1,12 @@
""" Security functions for the server. """
import re
import time
import hash_calculator
import settings
from const import UTF8
def perform_hash_checks(str_request)->bool:
""" Performs a hash check on the message, and verifies the timestamp is valid.
If either check fails, return False. Otherwise, return True.
Parameters:
str_request: The request body, as a string.
Returns:
True if the message is valid, False otherwise."""
def perform_hash_checks(str_request):
hash = get_message_hash(str_request)
if hash not in str_request:
@ -23,14 +17,9 @@ def perform_hash_checks(str_request)->bool:
return False
return True
def get_message_hash(json_command) -> str:
""" Get the object named "message", and run a hash over it. The hash is calculated
using the SHA512 algorithm. The hash is returned as a hex string.
Parameters:
json_command: The JSON command to hash.
Returns:
The hash of the message, as a string. """
"""Get the object named "message", and run a hash over it. """
strip1 = re.sub(".*\"message\":", "", json_command, 1)
if ("\"hash\":" in strip1):
strip2 = re.sub(',\"hash\":.*', '', strip1)

View File

@ -1,5 +1,4 @@
""" aidgaf-server - A simple server to handle requests from the aidgaf client.
This server will communicate with OpenAI to generate responses to the client."""
import json
import random
@ -13,17 +12,14 @@ default_request_body = b'{"message":{"command":"aidgaf","data":{"username":"Adam
class IDGAFServer(BaseHTTPRequestHandler):
""" This class handles the requests from the client. """
def do_GET(self):
""" This function handles GET requests. """
self.send_response(418)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("", UTF8))
def do_PATCH(self):
""" This function handles PATCH requests. """
body = self.get_body().decode(UTF8)
if not perform_sanity_checks(body):
self.send_response(403)
@ -32,9 +28,6 @@ class IDGAFServer(BaseHTTPRequestHandler):
self.do_request_handling(command)
def do_request_handling(self, command):
""" This function handles the request.
Parameters:
command: The command object received from the client. """
print(command)
if command['message']['command'] == 'aidgaf':
[responseCode, json_response] = idgaf.parse_idgaf_request(command)
@ -42,11 +35,7 @@ class IDGAFServer(BaseHTTPRequestHandler):
response_body = json.dumps(json_response)
self.handle_response(responseCode, response_body)
def get_body(self)->bytes:
""" This function returns the body of the request.
Returns:
The body of the request.
"""
def get_body(self):
header_length = self.headers.get('Content-Length')
request_body = default_request_body
if header_length != None and self.headers.get('Content-Length') != None:
@ -58,23 +47,13 @@ class IDGAFServer(BaseHTTPRequestHandler):
def handle_response(self, code, body):
""" This function handles the response to the client.
Parameters:
code: The HTTP response code.
body: The body of the response. """
self.send_response(code)
self.send_header("Content-type", "text/html")
self.end_headers()
print("sending:"+body)
self.wfile.write(bytes(body, UTF8))
def perform_sanity_checks(str_request)->bool:
""" Performs a hash check on the message, and verifies the timestamp is valid.
If either check fails, return False. Otherwise, return True.
Parameters:
str_request: The request body as a string.
Returns:
True if the message is valid, False otherwise. """
def perform_sanity_checks(str_request):
if settings.HASHKEY is not None:
hash = security.get_message_hash(str_request)
if hash not in str_request:
@ -89,7 +68,6 @@ def perform_sanity_checks(str_request)->bool:
def main():
""" This function starts the server. """
webServer = HTTPServer(
(settings.HOSTNAME, settings.SERVERPORT), IDGAFServer)
print("Server started http://%s:%s" %
@ -105,5 +83,4 @@ def main():
if __name__ == "__main__":
""" This function starts the main method. """
main()

View File

@ -1,40 +1,25 @@
"""this is the settings file for the server. It contains the settings for the server.
"""
import os
from const import UTF8
""" The hostname of the server. """
# The hostname used by this app
HOSTNAME: str = os.getenv('HOSTNAME') # localhost or some name
""" The port to broadcast the server """
# The port to broadcast the server
# 8087 or the port you want to run on. pass in with docker -e command.
SERVERPORT: int = 8087
""" The API key for OpenAI"""
# The API key for OpenAI
APIKEY: str = os.getenv('APIKEY') # secret key from OpenAPI website
if APIKEY is None:
raise Exception("APIKEY Environmental Variable must be set")
# The hash key
HASHKEY = bytes(os.getenv('HASHKEY'),UTF8) # shared secret for hmac of message
""" The prompts used for OpenAI. When the server receives a request, it will
randomly select one of these prompts to use."""
# The prompts used for OpenAI.
PROMPTS = [
"Say \"USERNAME does not give a fuck\" as a haiku and mention that it is a haiku.",
"Say \"USERNAME does not give a fuck\" in a Dr Suess poem.",
"Tell me a story about how \"USERNAME does not give a fuck\" using an outrageous situation where someone should care but they do not and thats fine.",
"Say \"USERNAME is completely apethetic and does not give a fuck\" in a verbose manner, using your most colorful words and one metaphor."
]
""" The maximum number of tokens to use in a single OpenAI request. """
OPEN_AI_MAX_TOKENS = 500
""" The model to use for OpenAI. """
OPEN_AI_COMPLETION_MODEL = "text-davinci-003"
""" The temperature to use for OpenAI. 0-2, 0 is basicall repeating the prompt, 2 is more random. """
TEMPERATURE = 0.7
""" The hash key for the server. Leave this blank if you don't want to use it. """
HASHKEY = bytes(os.getenv('HASHKEY'),
UTF8) # shared secret for hmac of message
""" The maximum age of a message in seconds. Only used if HASHKEY is set."""
MAX_MESSAGE_AGE = 600
TEMPERATURE = 0.8

View File

@ -1,11 +1,10 @@
""" aidgaf-server setup.py """
from setuptools import setup, find_namespace_packages
setup(
name='aidgaf',
version='0.1.0',
description='OpenAI GPT Implementation of IDGAF.',
long_description='Tells the user how much they don\'t GAF, using the power of OpenAI\'s GPT-3 API.',
version='1',
description='',
long_description='',
author='Adam Outler',
author_email='adamoutler@gmail.com',
license='IDGAF License',