Compare commits

...

22 Commits

Author SHA1 Message Date
1e2c77227b Update apikey variable, increase token len
All checks were successful
Updates/AI-Frontend/pipeline/head This commit looks good
Updates/AIDGAF-server/pipeline/head This commit looks good
2024-11-16 02:47:17 +00:00
0923f41428 Setup to use openwebui instead of openai 2024-11-16 02:16:20 +00:00
78ce7ab02a Update src/aidgaf/aidgaf_server/idgaf.py 2024-11-16 01:25:35 +00:00
33f89916de Update src/aidgaf/aidgaf_server/idgaf.py 2024-11-16 01:21:37 +00:00
d3872e4a62 Update src/aidgaf/aidgaf_server/settings.py 2024-11-16 01:16:20 +00:00
fc00639e72 Update src/aidgaf/aidgaf_server/idgaf.py 2024-11-16 00:57:42 +00:00
b5805c658f Update src/aidgaf/aidgaf_server/idgaf.py 2024-11-16 00:55:52 +00:00
3444bc50ad Update Dockerfile 2024-11-16 00:51:11 +00:00
1a91cbc8cc Update src/aidgaf/aidgaf_server/idgaf.py 2024-11-16 00:42:24 +00:00
a649fa87a4 Update Jenkinsfile 2024-11-16 00:26:14 +00:00
ce68767640 Update README.md 2024-09-24 14:04:08 +00:00
8c99b40b2a Update README.md 2024-09-24 14:03:07 +00:00
f1c7b195f3 Update HiREADME.md 2024-09-24 14:02:15 +00:00
b326642664 Update README.md 2024-09-24 13:58:01 +00:00
127892b5f2 Not for use in California 2024-09-24 13:57:17 +00:00
a4c63a7177 Update src/aidgaf/aidgaf_server/settings.py 2023-06-13 23:53:22 +00:00
a6e8e1a54f Update src/aidgaf/aidgaf_server/settings.py 2023-06-13 23:46:54 +00:00
f142ad562e Add options to allow response to external messages 2023-03-05 22:12:53 +00:00
086f3b6ede Update 'README.md' 2023-02-15 12:39:14 +00:00
18fb9c4679 Update 'README.md' 2023-02-15 12:38:10 +00:00
7ea946de88 remove Haiku 2023-02-15 05:12:09 +00:00
94f3c26b86 show logs on upgrade 2023-02-15 05:11:08 +00:00
6 changed files with 37 additions and 28 deletions

View File

@ -1,6 +1,7 @@
{ {
"secrets.enabledFolders": [ "secrets.enabledFolders": [
"aidgaf" "aidgaf",
"default"
], ],
"files.associations": { "files.associations": {
"[Jj]enkinsfile*": "groovy" "[Jj]enkinsfile*": "groovy"

View File

@ -15,7 +15,7 @@
FROM alpine:latest FROM alpine:latest
RUN apk add python3 py3-pip \ RUN apk add python3 py3-pip \
&& pip3 install openai\ && pip3 install openai requests\
&& mkdir /app && mkdir /app
COPY src/aidgaf /app/aidgaf COPY src/aidgaf /app/aidgaf
EXPOSE 8087 EXPOSE 8087

10
Jenkinsfile vendored
View File

@ -4,7 +4,7 @@ pipeline {
agent { agent {
docker { docker {
alwaysPull true alwaysPull true
image 'alpine:3.14' image 'docker.io/alpine:3.14'
label 'Wrangler1' label 'Wrangler1'
args '-u root' args '-u root'
} }
@ -33,16 +33,14 @@ pipeline {
steps { steps {
withCredentials([ sshUserPrivateKey(credentialsId: 'dockeruserOn192.168.1.115', keyFileVariable: 'sshkey', usernameVariable: 'user')]) { withCredentials([ sshUserPrivateKey(credentialsId: 'dockeruserOn192.168.1.115', keyFileVariable: 'sshkey', usernameVariable: 'user')]) {
sh '#!/bin/sh \n' + sh '#!/bin/sh \n' +
'set +e; docker stop aidgaf-server||echo machine stopped; docker rm aidgaf-server||echo machine does not exist; set -e' 'set +e; docker logs aidgaf-server; docker stop aidgaf-server||echo machine stopped; docker rm aidgaf-server||echo machine does not exist; set -e'
} }
} }
} }
stage('export docker container') { stage('export docker container') {
steps { steps {
sh '#!/bin/sh \n' +
'set +e; docker stop aidgaf-server||echo machine stopped; docker rm aidgaf-server||echo machine does not exist; set -e'
withCredentials([ withCredentials([
string(credentialsId: 'OpenAI-API-Token', variable: 'OPEN_AI_TOKEN'), string(credentialsId: 'ai-hacked-your-info-key', variable: 'OPENWEBUIAPIKEY'),
string(credentialsId: 'PapaHashingSecret', variable: 'PAPA_HASH'), string(credentialsId: 'PapaHashingSecret', variable: 'PAPA_HASH'),
string(credentialsId: 'PapaAsyncUrl', variable: 'ASYNC_URL'), string(credentialsId: 'PapaAsyncUrl', variable: 'ASYNC_URL'),
sshUserPrivateKey(credentialsId: 'dockeruserOn192.168.1.115', keyFileVariable: 'sshkey')]) { sshUserPrivateKey(credentialsId: 'dockeruserOn192.168.1.115', keyFileVariable: 'sshkey')]) {
@ -50,7 +48,7 @@ pipeline {
'mkdir -p ~/.ssh; cp "$sshkey" ~/.ssh/id_rsa' 'mkdir -p ~/.ssh; cp "$sshkey" ~/.ssh/id_rsa'
sh '#!/bin/sh \n' + sh '#!/bin/sh \n' +
/* groovylint-disable-next-line GStringExpressionWithinString */ /* groovylint-disable-next-line GStringExpressionWithinString */
'docker run --name=aidgaf-server -eSERVERPORT=8087 -eHOSTNAME=0.0.0.0 -eHASHKEY="${PAPA_HASH}" -eAPIKEY="${OPEN_AI_TOKEN}" -eASYNC_METHOD="PATCH" -eASYNC_URL="${ASYNC_URL}" -p8087:8087 -d --restart=always aidgaf' 'docker run --name=aidgaf-server -eSERVERPORT=8087 -eHOSTNAME=0.0.0.0 -eHASHKEY="${PAPA_HASH}" -eAPIKEY="${OPENWEBUIAPIKEY}" -eASYNC_METHOD="PATCH" -eASYNC_URL="${ASYNC_URL}" -p8087:8087 -d --restart=always aidgaf'
} }
} }
} }

13
README.md Normal file → Executable file
View File

@ -23,25 +23,28 @@ git clone https://git.adamoutler.com/aoutler/aidgaf-server
The server accepts a message in the following format: The server accepts a message in the following format:
``` json ``` json
{"service":"papa","message":{"command":"aidgaf","data":{"username":"AdamOutler"},"timestamp":1676231329}} {"message":{"command":"aidgaf","data":{"username":"AdamOutler"},"timestamp":1676231329}}
``` ```
## Built With ## Built With
* [Docker](https://www.docker.com/) * [Docker](https://www.docker.com/)
* [Visual Studio Code](https://code.visualstudio.com/) * [Visual Studio Code](https://code.visualstudio.com/)
* [OpenAI](https://openai.com/) * [OpenAI](https://openai.com/)
* [Python](https://www.python.org/) * [Python](https://www.python.org/)
* [Automated builds by Jenkins](https://jenkins.adamoutler.com/blue/organizations/jenkins/Update%20IDGAF%20Server/activity)
## Contributing ## Contributing
If you would like to contribute to this project, please fork the repository and submit a pull request. If you would like to contribute to this project, please fork the repository and submit a pull request.
## License ## License
Copyright 2023 Adam Outler Copyright 2023 Adam Outler
Licensed under the I Dont Give A F License, Version 1.0 (the "License"); Licensed under the I Dont Give A F License, Version 1.1 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
3. Send an email to idgaf@hackedyour.info if you find this helpful. rule 0. Not for use in California.
Note: If you're wondering where number 1 and 2 are, IDGAF.
rule 3. Send an email to idgaf@hackedyour.info if you find this helpful.
Note: If you're wondering where numbers 1 and 2 are, IDGAF.
## Acknowledgments ## Acknowledgments
* This README was generated using OpenAI's language model, ChatGPT. * This README was generated using OpenAI's language model, ChatGPT.

View File

@ -8,11 +8,11 @@ import requests
import settings import settings
from const import OPENAI_TIMEOUT from const import OPENAI_TIMEOUT
URL = "https://api.openai.com/v1/completions" URL = "https://ai.hackedyour.info/api/chat/completions"
""" The URL for the OpenAI API. """ """ The URL for the OpenAI API. """
DATA = {"model": settings.OPEN_AI_COMPLETION_MODEL, DATA = {"model": settings.OPEN_AI_COMPLETION_MODEL,
"prompt": settings.PROMPTS[0], "messages": [{"role":"system", "content":"You are AIDGAF server. You tell about how much people dont give a fuck"},{"role":"user", "content": settings.PROMPTS[0] }],
"temperature": settings.TEMPERATURE, "temperature": settings.TEMPERATURE,
"max_tokens": settings.OPEN_AI_MAX_TOKENS "max_tokens": settings.OPEN_AI_MAX_TOKENS
} }
@ -49,9 +49,11 @@ def parse_idgaf_request(idgaf_command) -> [int, dict]:
the_data = get_prompt(idgaf_command) the_data = get_prompt(idgaf_command)
response = get_gpt_response(the_data) response = get_gpt_response(the_data)
try: try:
response_text = response.json()['choices'][0]['text'].strip() response_text = response.json()['choices'][0]['message']["content"].strip()
except KeyError: except KeyError:
response_text = response.text response_text = response.text
print(response_text)
obj = get_response_base_object(response_text) obj = get_response_base_object(response_text)
return [response.status_code, obj] return [response.status_code, obj]
@ -76,13 +78,20 @@ def get_prompt(command) -> dict:
Returns: Returns:
A dictionary containing the data to send to OpenAI. A dictionary containing the data to send to OpenAI.
""" """
replyTo=command['message']['data'].get('replyTo',"")
replyText=command['message']['data'].get('replyText',"")
inputText=command['message']['data'].get('inputText',"")
my_prompt = random.choice(settings.PROMPTS) my_prompt = random.choice(settings.PROMPTS)
my_prompt = my_prompt.replace( my_prompt = my_prompt.replace(
"USERNAME", command['message']['data']['username']) "USERNAME", command['message']['data']['username'])
if replyTo:
print("Prompt selected: "+my_prompt) my_prompt=replyTo +"said \""+replyText+".\"\n In response, "+my_prompt
if inputText:
my_prompt="With the following in mind: "+ command['message']['data']['username'] +" doesn't care about \""+inputText+"\".\n\n"+my_prompt
print(my_prompt)
the_data = DATA the_data = DATA
the_data["prompt"] = my_prompt the_data["messages"][-1]["content"] = my_prompt
return the_data return the_data
@ -91,9 +100,7 @@ if __name__ == "__main__":
INPUT = '''{"service":"papa","message": INPUT = '''{"service":"papa","message":
{"command":"aidgaf","data":{"username":"AdamOutler"}, {"command":"aidgaf","data":{"username":"AdamOutler"},
"timestamp":1675725191}, "timestamp":1675725191},
"hash":"1bc73914478835d03f9ebdfb46328321d2bb656647e28 "hash":"1bc73914478835d03f9ebdfb46328321d2bb656647e2876d6f162cc1860607fcfca8d825c48e390a6a254ee0835c8a4fe5f9a25795a3a0880ae5a23e9c132cf2"}'''
76d6f162cc1860607fcfca8d825c48e390a6a254ee0835c8a4fe5f
9a25795a3a0880ae5a23e9c132cf2"}'''
test_command = json.loads(INPUT) test_command = json.loads(INPUT)
[code, result] = parse_idgaf_request(test_command) [code, result] = parse_idgaf_request(test_command)
print(result) print(result)

View File

@ -26,20 +26,20 @@ SERVERPORT: int = 8087
""" The prompts used for OpenAI. When the server receives a request, it will """ The prompts used for OpenAI. When the server receives a request, it will
randomly select one of these prompts to use.""" randomly select one of these prompts to use."""
PROMPTS = [ PROMPTS = [
"Say \"USERNAME does not give a fuck\" as a haiku and mention that it is a haiku.", "Say \"USERNAME does not give a fuck\" using 4 separate Haikus, and be sure to mention they are haikus before or after.",
"Say \"USERNAME does not give a fuck\" in a Dr Suess poem.", "Say \"USERNAME does not give a fuck\" within a 10 line Dr Suess poem." #,
"Tell me a funny, impossible, story about USERNAME. Make USERNAME seem relatable at the end. Make up an outrageous situation where the moral of the story is: \"USERNAME does not give a fuck\" to this very day." "Tell me a funny, impossible, story about USERNAME. Make USERNAME seem relatable at the end. Make up an outrageous situation where the moral of the story is: \"USERNAME does not give a fuck\" to this very day.",
"Say \"USERNAME is completely apethetic and does not give a fuck\" in a verbose manner, using your most colorful words and one metaphor." "Say \"USERNAME is completely apethetic and does not give a fuck\" in a verbose manner, using your most colorful words and one metaphor."
] ]
""" The maximum number of tokens to use in a single OpenAI request. """ """ The maximum number of tokens to use in a single OpenAI request. """
OPEN_AI_MAX_TOKENS = 500 OPEN_AI_MAX_TOKENS = 1000
""" The model to use for OpenAI. """ """ The model to use for OpenAI. """
OPEN_AI_COMPLETION_MODEL = "text-davinci-003" OPEN_AI_COMPLETION_MODEL = "granite3-dense:2b"
""" The temperature to use for OpenAI. 0-2, 0 is basicall repeating the prompt, 2 is more random. """ """ The temperature to use for OpenAI. 0-2, 0 is basicall repeating the prompt, 2 is more random. """
TEMPERATURE = 0.7 TEMPERATURE = 0.8
""" The hash key for the server. Leave this blank if you don't want to use it. """ """ The hash key for the server. Leave this blank if you don't want to use it. """
HASHKEY = bytes(os.getenv('HASHKEY') or "",UTF8) # shared secret for hmac of message HASHKEY = bytes(os.getenv('HASHKEY') or "",UTF8) # shared secret for hmac of message