Compare commits

..

3 Commits
0.1.3 ... 0.1.6

Author SHA1 Message Date
d72676c7e0 Improve chat (#2)
Co-authored-by: Myx <info@azaaxin.com>
2024-08-11 01:18:29 +02:00
b30d47e351 Add LLM info into main Readme 2024-06-19 12:33:19 +02:00
3ce0df7eaf Added Ollama and Ollama Web UI 2024-06-19 12:21:05 +02:00
8 changed files with 79 additions and 5 deletions

View File

@@ -12,9 +12,11 @@ namespace Lunaris2.Handler.ChatCommand
{
private readonly OllamaApiClient _ollama;
private readonly Dictionary<ulong, Chat?> _chatContexts = new();
private readonly ChatSettings _chatSettings;
public ChatHandler(IOptions<ChatSettings> chatSettings)
{
_chatSettings = chatSettings.Value;
var uri = new Uri(chatSettings.Value.Url);
_ollama = new OllamaApiClient(uri)
@@ -29,6 +31,10 @@ namespace Lunaris2.Handler.ChatCommand
_chatContexts.TryAdd(channelId, null);
var userMessage = command.FilteredMessage;
var randomPersonality = _chatSettings.Personalities[new Random().Next(_chatSettings.Personalities.Count)];
userMessage = $"{randomPersonality.Instruction} {userMessage}";
using var setTyping = command.Message.Channel.EnterTypingState();

View File

@@ -4,4 +4,11 @@ public class ChatSettings
{
public string Url { get; set; }
public string Model { get; set; }
public List<Personality> Personalities { get; set; }
}
public class Personality
{
public string Name { get; set; }
public string Instruction { get; set; }
}

View File

@@ -16,6 +16,7 @@
<PackageReference Include="Discord.Net.Rest" Version="3.13.1" />
<PackageReference Include="MediatR" Version="12.2.0" />
<PackageReference Include="Microsoft.Extensions.Configuration" Version="8.0.0" />
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" Version="8.0.1" />
<PackageReference Include="Microsoft.Extensions.Configuration.Json" Version="8.0.0" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="8.0.0" />
<PackageReference Include="Microsoft.Extensions.Hosting" Version="8.0.0" />

View File

@@ -42,7 +42,8 @@ public class Program
.AddJsonFile("appsettings.json")
.Build();
services.AddSingleton(client)
services
.AddSingleton(client)
.AddSingleton(commands)
.AddMediatR(configuration => configuration.RegisterServicesFromAssembly(Assembly.GetExecutingAssembly()))
.AddSingleton<DiscordEventListener>()

View File

@@ -11,7 +11,13 @@
"LavaLinkHostname": "127.0.0.1",
"LavaLinkPort": 2333,
"LLM": {
"Url": "http://192.168.50.54:11434",
"Model": "gemma"
"Url": "http://localhost:7869",
"Model": "gemma",
"personalities": [
{
"name": "Lunaris",
"instruction": "Let's role play. Your name is Lunaris and can be quite rude. You are supposed to answer on following question with a maximum of 2 sentences:"
}
]
}
}

View File

@@ -7,6 +7,7 @@ Lunaris2 is a Discord bot designed to play music in your server's voice channels
- Play music from YouTube directly in your Discord server.
- Skip tracks, pause, and resume playback.
- Queue system to line up your favorite tracks.
- Local LLM (AI chatbot) that answers on @mentions in Discord chat. See more about it below.
## Setup
@@ -17,6 +18,11 @@ Lunaris2 is a Discord bot designed to play music in your server's voice channels
5. Make sure you got docker installed. And run the file ``start-services.sh``, make sure you got git-bash installed.
6. Now you can start the project and run the application.
## LLM
Lunaris supports AI chat using a large language model, this is done by hosting the LLM locally, in this case Docker will set it up for you when you run the start-services script.
The LLM is run using Ollama see more about Ollama [here](https://ollama.com/). Running LLM locally requires much resources from your system, minimum requirements is at least 8GB of ram. If your don't have enought ram, select a LLM model in the [appsettings file](https://github.com/Myxelium/Lunaris2.0/blob/master/Bot/appsettings.json#L15) that requires less of your system.
## Usage
- `/play <song>`: Plays the specified song in the voice channel you're currently in.

View File

@@ -24,7 +24,52 @@ services:
ports:
# you only need this if you want to make your lavalink accessible from outside of containers
- "2333:2333"
ollama:
image: ollama/ollama:latest
ports:
- 7869:11434
volumes:
- .:/code
- ./ollama/ollama:/root/.ollama
container_name: ollama
pull_policy: always
tty: true
restart: always
environment:
- OLLAMA_KEEP_ALIVE=24h
- OLLAMA_HOST=0.0.0.0
networks:
- ollama-docker
ollama-webui:
image: ghcr.io/open-webui/open-webui:main
container_name: ollama-webui
volumes:
- ./ollama/ollama-webui:/app/backend/data
depends_on:
- ollama
ports:
- 8080:8080
environment: # https://docs.openwebui.com/getting-started/env-configuration#default_models
- OLLAMA_BASE_URLS=http://host.docker.internal:7869 #comma separated ollama hosts
- ENV=dev
- WEBUI_AUTH=False
- WEBUI_NAME=valiantlynx AI
- WEBUI_URL=http://localhost:8080
- WEBUI_SECRET_KEY=t0p-s3cr3t
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- ollama-docker
volumes:
ollama: {}
networks:
# create a lavalink network you can add other containers to, to give them access to Lavalink
lavalink:
name: lavalink
name: lavalink
ollama-docker:
external: false

View File

@@ -1 +1,3 @@
docker compose up -d
docker compose up -d
read -p "Press enter to continue"