Compare commits

..

3 Commits

Author SHA1 Message Date
Myx
ddf5bfdd3d Working LLM chatbot 2024-06-01 23:19:53 +02:00
Myx
a703f7f9a2 Clean 2024-06-01 12:59:45 +02:00
Myx
87b35a2203 Working chatbot 2024-06-01 12:34:03 +02:00
11 changed files with 26 additions and 140 deletions

View File

@@ -7,13 +7,10 @@ on:
jobs:
build:
runs-on: ubuntu-latest
runs-on: windows-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0 # required for github-action-get-previous-tag
- uses: actions/checkout@v2
- name: Setup .NET
uses: actions/setup-dotnet@v1
@@ -30,19 +27,15 @@ jobs:
run: dotnet publish ./Bot/Lunaris2.csproj --configuration Release --output ./out
- name: Zip the build
run: 7z a -tzip ./out/Lunaris.zip ./out/*
run: 7z a -tzip ./out/Bot.zip ./out/*
- name: Get previous tag
id: previoustag
uses: 'WyriHaximus/github-action-get-previous-tag@v1'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Get the tag name
id: get_tag
run: echo "::set-output name=tag::${GITHUB_REF#refs/tags/}"
- name: Get next minor version
id: semver
uses: 'WyriHaximus/github-action-next-semvers@v1'
with:
version: ${{ steps.previoustag.outputs.tag }}
- name: Get the version
id: get_version
run: echo "::set-output name=version::$(date +%s).${{ github.run_id }}"
- name: Create Release
id: create_release
@@ -50,8 +43,8 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token
with:
tag_name: ${{ steps.semver.outputs.patch }}
release_name: Release ${{ steps.semver.outputs.patch }}
tag_name: ${{ steps.get_version.outputs.version }}
release_name: Release v${{ steps.get_version.outputs.version }}
draft: false
prerelease: false
@@ -62,6 +55,6 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./out/Lunaris.zip
asset_name: Lunaris.zip
asset_path: ./out/Bot.zip
asset_name: Bot.zip
asset_content_type: application/zip

View File

@@ -12,11 +12,9 @@ namespace Lunaris2.Handler.ChatCommand
{
private readonly OllamaApiClient _ollama;
private readonly Dictionary<ulong, Chat?> _chatContexts = new();
private readonly ChatSettings _chatSettings;
public ChatHandler(IOptions<ChatSettings> chatSettings)
{
_chatSettings = chatSettings.Value;
var uri = new Uri(chatSettings.Value.Url);
_ollama = new OllamaApiClient(uri)
@@ -31,10 +29,6 @@ namespace Lunaris2.Handler.ChatCommand
_chatContexts.TryAdd(channelId, null);
var userMessage = command.FilteredMessage;
var randomPersonality = _chatSettings.Personalities[new Random().Next(_chatSettings.Personalities.Count)];
userMessage = $"{randomPersonality.Instruction} {userMessage}";
using var setTyping = command.Message.Channel.EnterTypingState();

View File

@@ -4,11 +4,4 @@ public class ChatSettings
{
public string Url { get; set; }
public string Model { get; set; }
public List<Personality> Personalities { get; set; }
}
public class Personality
{
public string Name { get; set; }
public string Instruction { get; set; }
}

View File

@@ -1,8 +0,0 @@
## Ollama - Large Language Model Chat - Handler
This handler "owns" the logic for accessing the ollama api, which runs the transformer model.
> How to get started with a local chat bot see: [Run LLMs Locally using Ollama](https://marccodess.medium.com/run-llms-locally-using-ollama-8f04dd9b14f9)
Assuming you are on the same network as the Ollama server you should configure it to be accessible to other machines on the network, however this is only required if you aren't running it from localhost relative to the bot.
See: [How do I configure Ollama server?](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)

View File

@@ -16,7 +16,6 @@
<PackageReference Include="Discord.Net.Rest" Version="3.13.1" />
<PackageReference Include="MediatR" Version="12.2.0" />
<PackageReference Include="Microsoft.Extensions.Configuration" Version="8.0.0" />
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" Version="8.0.1" />
<PackageReference Include="Microsoft.Extensions.Configuration.Json" Version="8.0.0" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="8.0.0" />
<PackageReference Include="Microsoft.Extensions.Hosting" Version="8.0.0" />

View File

@@ -42,8 +42,7 @@ public class Program
.AddJsonFile("appsettings.json")
.Build();
services
.AddSingleton(client)
services.AddSingleton(client)
.AddSingleton(commands)
.AddMediatR(configuration => configuration.RegisterServicesFromAssembly(Assembly.GetExecutingAssembly()))
.AddSingleton<DiscordEventListener>()

View File

@@ -2,14 +2,9 @@
```mermaid
flowchart TD
Program[Program] -->|Register| EventListener
EventListener[DiscordEventListener] --> A[MessageReceivedHandler]
EventListener[DiscordEventListener] --> A
EventListener[DiscordEventListener] --> A2[SlashCommandReceivedHandler]
A --> |Message| f{If bot is mentioned}
f --> |ChatCommand| v[ChatHandler]
A2[SlashCommandReceivedHandler] -->|Message| C{Send to correct command by
A[MessageReceivedHandler] -->|Message| C{Send to correct command by
looking at commandName}
C -->|JoinCommand| D[JoinHandler]
@@ -17,33 +12,9 @@ flowchart TD
C -->|HelloCommand| F[HelloHandler]
C -->|GoodbyeCommand| G[GoodbyeHandler]
```
Program registers an event listener ```DiscordEventListener``` which publish a message :
```c#
await Mediator.Publish(new MessageReceivedNotification(arg), _cancellationToken);
```
|Name| Description |
|--|--|
| SlashCommandReceivedHandler | Handles commands using ``/`` from any Discord Guild/Server. |
| MessageReceivedHandler| Listens to **all** messages. |
## Handler integrations
```mermaid
flowchart TD
D[JoinHandler] --> Disc[Discord Api]
E[PlayHandler] --> Disc[Discord Api]
F[HelloHandler] --> Disc[Discord Api]
G[GoodbyeHandler] --> Disc[Discord Api]
v[ChatHandler] --> Disc[Discord Api]
v --> o[Ollama Server]
o --> v
E --> Lava[Lavalink]
```
|Name| Description |
|--|--|
| JoinHandler| Handles the logic for **just** joining a voice channel. |
| PlayHandler| Handles the logic for joining and playing music in a voice channel. |
| HelloHandler| Responds with Hello. (Dummy handler, will be removed)|
| GoodbyeHandler| Responds with Goodbye. (Dummy handler, will be removed)|
| ChatHandler| Handles the logic for LLM chat with user. |

View File

@@ -9,15 +9,9 @@
"Token": "discordToken",
"LavaLinkPassword": "youshallnotpass",
"LavaLinkHostname": "127.0.0.1",
"LavaLinkPort": 2333,
"LavaLinkPort": 2333
"LLM": {
"Url": "http://localhost:7869",
"Model": "gemma",
"personalities": [
{
"name": "Lunaris",
"instruction": "Let's role play. Your name is Lunaris and can be quite rude. You are supposed to answer on following question with a maximum of 2 sentences:"
}
]
"Url": "http://192.168.50.54:11434",
"Model": "gemma"
}
}

View File

@@ -7,7 +7,6 @@ Lunaris2 is a Discord bot designed to play music in your server's voice channels
- Play music from YouTube directly in your Discord server.
- Skip tracks, pause, and resume playback.
- Queue system to line up your favorite tracks.
- Local LLM (AI chatbot) that answers on @mentions in Discord chat. See more about it below.
## Setup
@@ -18,11 +17,6 @@ Lunaris2 is a Discord bot designed to play music in your server's voice channels
5. Make sure you got docker installed. And run the file ``start-services.sh``, make sure you got git-bash installed.
6. Now you can start the project and run the application.
## LLM
Lunaris supports AI chat using a large language model, this is done by hosting the LLM locally, in this case Docker will set it up for you when you run the start-services script.
The LLM is run using Ollama see more about Ollama [here](https://ollama.com/). Running LLM locally requires much resources from your system, minimum requirements is at least 8GB of ram. If your don't have enought ram, select a LLM model in the [appsettings file](https://github.com/Myxelium/Lunaris2.0/blob/master/Bot/appsettings.json#L15) that requires less of your system.
## Usage
- `/play <song>`: Plays the specified song in the voice channel you're currently in.
@@ -31,3 +25,7 @@ The LLM is run using Ollama see more about Ollama [here](https://ollama.com/). R
## Contributing
Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.
## License
[MIT](https://choosealicense.com/licenses/mit/)

View File

@@ -24,52 +24,7 @@ services:
ports:
# you only need this if you want to make your lavalink accessible from outside of containers
- "2333:2333"
ollama:
image: ollama/ollama:latest
ports:
- 7869:11434
volumes:
- .:/code
- ./ollama/ollama:/root/.ollama
container_name: ollama
pull_policy: always
tty: true
restart: always
environment:
- OLLAMA_KEEP_ALIVE=24h
- OLLAMA_HOST=0.0.0.0
networks:
- ollama-docker
ollama-webui:
image: ghcr.io/open-webui/open-webui:main
container_name: ollama-webui
volumes:
- ./ollama/ollama-webui:/app/backend/data
depends_on:
- ollama
ports:
- 8080:8080
environment: # https://docs.openwebui.com/getting-started/env-configuration#default_models
- OLLAMA_BASE_URLS=http://host.docker.internal:7869 #comma separated ollama hosts
- ENV=dev
- WEBUI_AUTH=False
- WEBUI_NAME=valiantlynx AI
- WEBUI_URL=http://localhost:8080
- WEBUI_SECRET_KEY=t0p-s3cr3t
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- ollama-docker
volumes:
ollama: {}
networks:
# create a lavalink network you can add other containers to, to give them access to Lavalink
lavalink:
name: lavalink
ollama-docker:
external: false
name: lavalink

View File

@@ -1,3 +1 @@
docker compose up -d
read -p "Press enter to continue"
docker compose up -d