This commit is contained in:
xXCryingLaughingXx
2026-02-17 20:24:30 -06:00
committed by GitHub
parent f701cae171
commit 30d9f48d2e
14 changed files with 2058 additions and 1 deletions

View File

@@ -17,7 +17,7 @@ internal class BotCommands
{
private ChatBot _bot;
private Logger _logger = LogManager.GetCurrentClassLogger();
private char CommandPrefix = '!';
private char CommandPrefix = '!';
private IEnumerable<ICommand> Commands;
private CancellationToken _cancellationToken;

View File

@@ -108,6 +108,7 @@ public class BotServices
_logger.Info("Starting websocket watchdog and Howl.gg user stats timer");
_websocketWatchdog = WebsocketWatchdog();
_howlggGetUserTimer = HowlggGetUserTimer();
ConversationContextManager.StartCleanupTimer(_cancellationToken);
}
private async Task BuildKasinoRain()

View File

@@ -0,0 +1,182 @@
using System.Collections.Concurrent;
using KfChatDotNetBot.Settings;
using NLog;
namespace KfChatDotNetBot.Services;
public class ConversationMessage
{
public string Role { get; set; } = string.Empty;
public string Content { get; set; } = string.Empty;
}
public class ConversationContext
{
public List<ConversationMessage> Messages { get; set; } = [];
public string? Summary { get; set; }
public int EstimatedTokenCount { get; set; }
public DateTime LastActivity { get; set; } = DateTime.UtcNow;
public string? Mood { get; set; }
public void RecalculateTokens()
{
var tokens = 0;
if (Summary != null)
tokens += Summary.Length / 4;
foreach (var msg in Messages)
tokens += msg.Content.Length / 4 + 4; // +4 for role/message overhead
EstimatedTokenCount = tokens;
}
}
public static class ConversationContextManager
{
private static readonly Logger Logger = LogManager.GetCurrentClassLogger();
private static readonly ConcurrentDictionary<string, ConversationContext> Contexts = new();
private static Task? _cleanupTask;
private static CancellationToken _cancellationToken;
public static void StartCleanupTimer(CancellationToken cancellationToken)
{
_cancellationToken = cancellationToken;
_cleanupTask = CleanupLoop();
}
public static string GetContextKey(string mode, int userId, int roomId)
{
return mode.ToLowerInvariant() switch
{
"perchatter" => $"user:{userId}",
"perroom" => $"room:{roomId}",
_ => $"user:{userId}" // fallback to per-chatter
};
}
public static string GetOrAssignMood(string contextKey)
{
var context = Contexts.GetOrAdd(contextKey, _ => new ConversationContext());
if (context.Mood == null)
{
context.Mood = Commands.NoraMoods.GetRandomMood();
Logger.Debug($"Assigned mood for {contextKey}: {context.Mood}");
}
return context.Mood;
}
public static void AddMessage(string contextKey, string role, string content)
{
var context = Contexts.GetOrAdd(contextKey, _ => new ConversationContext());
context.Messages.Add(new ConversationMessage { Role = role, Content = content });
context.LastActivity = DateTime.UtcNow;
context.RecalculateTokens();
}
public static List<ConversationMessage> GetMessagesForApi(string contextKey)
{
if (!Contexts.TryGetValue(contextKey, out var context))
return [];
var messages = new List<ConversationMessage>();
if (context.Summary != null)
{
messages.Add(new ConversationMessage
{
Role = "system",
Content = $"Previous conversation summary: {context.Summary}"
});
}
messages.AddRange(context.Messages);
return messages;
}
public static async Task CompactIfNeededAsync(string contextKey)
{
if (!Contexts.TryGetValue(contextKey, out var context))
return;
var maxTokensSetting = await SettingsProvider.GetValueAsync(BuiltIn.Keys.GrokNoraContextMaxTokens);
var maxTokens = int.TryParse(maxTokensSetting.Value, out var mt) ? mt : 800;
if (context.EstimatedTokenCount <= maxTokens)
return;
// Need at least 3 messages to compact (keep last 2, summarize the rest)
if (context.Messages.Count < 3)
return;
Logger.Info($"Compacting context for {contextKey}: {context.EstimatedTokenCount} tokens > {maxTokens} limit");
// Keep the last 2 messages, summarize everything else
var keepCount = 2;
var toSummarize = context.Messages.Take(context.Messages.Count - keepCount).ToList();
var toKeep = context.Messages.Skip(context.Messages.Count - keepCount).ToList();
// Build the text to summarize
var summaryInput = "";
if (context.Summary != null)
summaryInput = $"Previous summary: {context.Summary}\n\n";
summaryInput += string.Join("\n",
toSummarize.Select(m => $"{m.Role}: {m.Content}"));
var summary = await GrokApi.GetChatCompletionAsync(
"Summarize this conversation in 2-3 concise sentences. Capture the key topics and any important details the user mentioned.",
summaryInput,
maxTokens: 150);
if (summary != null)
{
context.Summary = summary;
context.Messages = toKeep;
context.RecalculateTokens();
Logger.Info($"Compacted context for {contextKey}: now {context.EstimatedTokenCount} tokens");
}
else
{
// Compaction failed — just drop the oldest messages to stay under budget
Logger.Warn($"Compaction API call failed for {contextKey}, dropping oldest messages instead");
context.Messages = toKeep;
context.RecalculateTokens();
}
}
public static bool ClearContext(string contextKey)
{
return Contexts.TryRemove(contextKey, out _);
}
private static async Task CleanupLoop()
{
using var timer = new PeriodicTimer(TimeSpan.FromMinutes(5));
while (await timer.WaitForNextTickAsync(_cancellationToken))
{
try
{
await CleanupExpired();
}
catch (Exception ex)
{
Logger.Error(ex, "Error during conversation context cleanup");
}
}
}
private static async Task CleanupExpired()
{
var expirySetting = await SettingsProvider.GetValueAsync(BuiltIn.Keys.GrokNoraContextExpiryMinutes);
var expiryMinutes = int.TryParse(expirySetting.Value, out var em) ? em : 30;
var cutoff = DateTime.UtcNow.AddMinutes(-expiryMinutes);
var expired = Contexts.Where(kvp => kvp.Value.LastActivity < cutoff).Select(kvp => kvp.Key).ToList();
foreach (var key in expired)
{
Contexts.TryRemove(key, out _);
Logger.Debug($"Expired conversation context: {key}");
}
if (expired.Count > 0)
Logger.Info($"Cleaned up {expired.Count} expired conversation contexts");
}
}

View File

@@ -0,0 +1,197 @@
using System.Net;
using System.Net.Http.Headers;
using System.Text;
using System.Text.Json;
using System.Text.Json.Serialization;
using KfChatDotNetBot.Settings;
using NLog;
namespace KfChatDotNetBot.Services;
/// <summary>
/// Grok AI (xAI) API integration for chat completions.
///
/// This service integrates with xAI's Grok models to provide AI-powered responses
/// for the !nora command. Grok uses an OpenAI-compatible API format.
///
/// API Documentation: https://docs.x.ai/api
/// API Endpoint: https://api.x.ai/v1/chat/completions
/// Pricing: ~$5 per 1M input tokens for grok-4-1-fast-reasoning
/// Console: https://console.x.ai/
///
/// Features:
/// - OpenAI-compatible chat completion format
/// - Configurable model (grok-4-1-fast-reasoning, grok-2-latest, etc.)
/// - Customizable system prompt for personality
/// - Response length limited to 200 tokens for chat brevity
///
/// Configuration:
/// - Grok.ApiKey: Your xAI API key (required)
/// - Grok.Chat.Endpoint: API endpoint (optional override)
/// - Grok.Nora.Model: Model to use (default: grok-4-1-fast-reasoning)
/// - Grok.Nora.SystemPrompt: Personality/instructions for Nora
/// - Proxy: Global proxy setting (optional)
/// </summary>
public static class GrokApi
{
private static readonly Logger Logger = LogManager.GetCurrentClassLogger();
/// <summary>
/// Chat completion response from Grok API.
/// Follows OpenAI-compatible format.
/// </summary>
class ChatCompletionResponse
{
[JsonPropertyName("id")] public string Id { get; set; } = string.Empty;
[JsonPropertyName("object")] public string Object { get; set; } = string.Empty;
[JsonPropertyName("created")] public long Created { get; set; }
[JsonPropertyName("model")] public string Model { get; set; } = string.Empty;
[JsonPropertyName("choices")] public List<ChatChoice> Choices { get; set; } = new();
[JsonPropertyName("usage")] public Usage Usage { get; set; } = new();
}
class ChatChoice
{
[JsonPropertyName("index")] public int Index { get; set; }
[JsonPropertyName("message")] public ChatMessage Message { get; set; } = new();
[JsonPropertyName("finish_reason")] public string FinishReason { get; set; } = string.Empty;
}
class ChatMessage
{
[JsonPropertyName("role")] public string Role { get; set; } = string.Empty;
[JsonPropertyName("content")] public string Content { get; set; } = string.Empty;
}
class Usage
{
[JsonPropertyName("prompt_tokens")] public int PromptTokens { get; set; }
[JsonPropertyName("completion_tokens")] public int CompletionTokens { get; set; }
[JsonPropertyName("total_tokens")] public int TotalTokens { get; set; }
}
/// <summary>
/// Sends a chat completion request to Grok AI.
///
/// Flow:
/// 1. Fetch API key and settings from database
/// 2. Configure HTTP client with optional proxy
/// 3. Build chat completion payload with system + user messages
/// 4. Send POST request to Grok API
/// 5. Parse response and extract message content
///
/// Request parameters:
/// - model: Configurable via settings or parameter (default: grok-4-1-fast-reasoning)
/// - messages: System prompt + user message
/// - temperature: 0.7 (balanced creativity)
/// - max_tokens: 200 (keeps responses brief for chat)
///
/// Error handling:
/// - Returns null if API key is not configured
/// - Returns null if HTTP request fails
/// - Returns null if response is invalid
/// - All errors are logged via NLog
///
/// Cost considerations:
/// - Each call costs based on input + output tokens
/// - Typical cost: ~$0.0003 per interaction
/// - Rate limiting (3/min/user) prevents runaway costs
/// </summary>
/// <param name="systemPrompt">Instructions for the AI (personality, constraints, etc.)</param>
/// <param name="userMessage">The user's question/message</param>
/// <param name="model">Optional model override (uses Grok.Nora.Model from settings if null)</param>
/// <param name="maxTokens">Maximum response tokens (default 300)</param>
/// <returns>The AI's response content, or null on error</returns>
public static Task<string?> GetChatCompletionAsync(string systemPrompt, string userMessage, string? model = null, int maxTokens = 300)
{
var messages = new List<ConversationMessage>
{
new() { Role = "user", Content = userMessage }
};
return GetChatCompletionAsync(systemPrompt, messages, model, maxTokens);
}
/// <summary>
/// Sends a chat completion request to Grok AI with a full conversation history.
/// </summary>
/// <param name="systemPrompt">Instructions for the AI (personality, constraints, etc.)</param>
/// <param name="messages">Conversation messages (system context summaries, user messages, assistant responses)</param>
/// <param name="model">Optional model override (uses Grok.Nora.Model from settings if null)</param>
/// <param name="maxTokens">Maximum response tokens (default 300)</param>
/// <returns>The AI's response content, or null on error</returns>
public static async Task<string?> GetChatCompletionAsync(string systemPrompt, List<ConversationMessage> messages, string? model = null, int maxTokens = 300)
{
Logger.Info("Sending chat completion request to Grok");
var settings = await SettingsProvider.GetMultipleValuesAsync([
BuiltIn.Keys.GrokApiKey,
BuiltIn.Keys.GrokChatEndpoint,
BuiltIn.Keys.GrokNoraModel,
BuiltIn.Keys.Proxy
]);
if (string.IsNullOrEmpty(settings[BuiltIn.Keys.GrokApiKey].Value))
{
Logger.Error("Grok API key is not set");
return null;
}
var handler = new HttpClientHandler { AutomaticDecompression = DecompressionMethods.All };
if (settings[BuiltIn.Keys.Proxy].Value != null)
{
handler.UseProxy = true;
handler.Proxy = new WebProxy(settings[BuiltIn.Keys.Proxy].Value);
Logger.Debug($"Using proxy {settings[BuiltIn.Keys.Proxy].Value}");
}
using var client = new HttpClient(handler);
try
{
client.DefaultRequestHeaders.Authorization =
new AuthenticationHeaderValue("Bearer", settings[BuiltIn.Keys.GrokApiKey].Value);
var modelToUse = model ?? settings[BuiltIn.Keys.GrokNoraModel].Value ?? "grok-4-1-fast-reasoning";
// Build the full message list: system prompt first, then conversation history
var apiMessages = new List<object>
{
new { role = "system", content = systemPrompt }
};
apiMessages.AddRange(messages.Select(m => (object)new { role = m.Role, content = m.Content }));
var payload = new
{
model = modelToUse,
messages = apiMessages,
temperature = 0.7,
max_tokens = maxTokens
};
var json = JsonSerializer.Serialize(payload);
var content = new StringContent(json, Encoding.UTF8, "application/json");
var endpoint = settings[BuiltIn.Keys.GrokChatEndpoint].Value
?? "https://api.x.ai/v1/chat/completions";
var response = await client.PostAsync(endpoint, content);
response.EnsureSuccessStatusCode();
var responseBody = await response.Content.ReadAsStringAsync();
var completionResponse = JsonSerializer.Deserialize<ChatCompletionResponse>(responseBody);
if (completionResponse?.Choices == null || completionResponse.Choices.Count == 0)
{
Logger.Error("No completion returned from Grok API");
return null;
}
return completionResponse.Choices[0].Message.Content;
}
catch (Exception ex)
{
Logger.Error(ex, "Error while communicating with Grok API");
}
return null;
}
}

View File

@@ -0,0 +1,182 @@
using System.Net;
using System.Net.Http.Headers;
using System.Text;
using System.Text.Json;
using System.Text.Json.Serialization;
using KfChatDotNetBot.Settings;
using NLog;
namespace KfChatDotNetBot.Services;
/// <summary>
/// OpenAI Moderation API integration for content filtering.
///
/// This service uses OpenAI's free Moderation API to detect potentially harmful content.
/// The moderation categories are used to filter out illegal content while allowing
/// offensive but legal content (profanity, hate speech, etc.).
///
/// API Documentation: https://platform.openai.com/docs/api-reference/moderations
/// API Endpoint: https://api.openai.com/v1/moderations
/// Cost: Free (but has rate limits)
///
/// Content Policy:
/// - BLOCK: illicit activities, self-harm instructions, CSAM
/// - ALLOW: profanity, harassment, hate speech, adult sexual content, violence
///
/// Configuration:
/// - OpenAi.ApiKey: Your OpenAI API key (required)
/// - OpenAi.Moderation.Endpoint: API endpoint (optional override)
/// - Proxy: Global proxy setting (optional)
/// </summary>
public static class OpenAiModeration
{
private static readonly Logger Logger = LogManager.GetCurrentClassLogger();
/// <summary>
/// Response wrapper from OpenAI Moderation API.
/// Contains model info and list of moderation results.
/// </summary>
public class ModerationResponse
{
[JsonPropertyName("id")] public string Id { get; set; } = string.Empty;
[JsonPropertyName("model")] public string Model { get; set; } = string.Empty;
[JsonPropertyName("results")] public List<ModerationResult> Results { get; set; } = new();
}
public class ModerationResult
{
[JsonPropertyName("flagged")] public bool Flagged { get; set; }
[JsonPropertyName("categories")] public ModerationCategories Categories { get; set; } = new();
[JsonPropertyName("category_scores")] public Dictionary<string, double> CategoryScores { get; set; } = new();
}
public class ModerationCategories
{
[JsonPropertyName("harassment")] public bool Harassment { get; set; }
[JsonPropertyName("harassment/threatening")] public bool HarassmentThreatening { get; set; }
[JsonPropertyName("sexual")] public bool Sexual { get; set; }
[JsonPropertyName("hate")] public bool Hate { get; set; }
[JsonPropertyName("hate/threatening")] public bool HateThreatening { get; set; }
[JsonPropertyName("illicit")] public bool Illicit { get; set; }
[JsonPropertyName("illicit/violent")] public bool IllicitViolent { get; set; }
[JsonPropertyName("self-harm")] public bool SelfHarm { get; set; }
[JsonPropertyName("self-harm/intent")] public bool SelfHarmIntent { get; set; }
[JsonPropertyName("self-harm/instructions")] public bool SelfHarmInstructions { get; set; }
[JsonPropertyName("sexual/minors")] public bool SexualMinors { get; set; }
[JsonPropertyName("violence")] public bool Violence { get; set; }
[JsonPropertyName("violence/graphic")] public bool ViolenceGraphic { get; set; }
}
/// <summary>
/// Determines if content is "illegal" (vs just profane/offensive).
///
/// This method defines the content policy for the !nora command by deciding
/// what gets blocked vs what gets allowed through to the AI.
///
/// BLOCKED categories (return true):
/// - illicit: Instructions for illegal activities (bomb-making, drug manufacturing, hacking)
/// - illicit/violent: Violent illegal activities
/// - self-harm/instructions: Detailed methods for self-harm
/// - sexual/minors: Any content involving minors (CSAM)
///
/// ALLOWED categories (return false):
/// - harassment: Insults, bullying, threatening language
/// - hate: Hate speech, slurs
/// - sexual: Adult sexual content
/// - violence: Descriptions of violence
/// - violence/graphic: Graphic violence
///
/// Design rationale:
/// The bot operates in an edgy chat environment where profanity and offensive
/// language are common. This policy allows that culture while still preventing
/// the bot from being used to generate truly dangerous or illegal content.
/// </summary>
/// <param name="categories">The moderation categories from OpenAI</param>
/// <returns>True if content should be blocked, false if it should be allowed</returns>
public static bool IsIllegalContent(ModerationCategories categories)
{
return categories.Illicit ||
categories.IllicitViolent ||
categories.SelfHarmInstructions ||
categories.SexualMinors;
}
/// <summary>
/// Sends content to OpenAI Moderation API for analysis.
///
/// Flow:
/// 1. Fetch API key and settings from database
/// 2. Configure HTTP client with optional proxy
/// 3. Send POST request to OpenAI with input text
/// 4. Parse response and return first moderation result
///
/// Error handling:
/// - Returns null if API key is not configured
/// - Returns null if HTTP request fails
/// - Returns null if response is invalid
/// - All errors are logged via NLog
///
/// The calling code should treat null as a failure and block the content
/// as a safety precaution (fail-safe behavior).
/// </summary>
/// <param name="input">The text to moderate</param>
/// <returns>ModerationResult with flagged categories, or null on error</returns>
public static async Task<ModerationResult?> ModerateContentAsync(string input)
{
Logger.Info("Sending moderation request to OpenAI");
var settings = await SettingsProvider.GetMultipleValuesAsync([
BuiltIn.Keys.OpenAiApiKey,
BuiltIn.Keys.OpenAiModerationEndpoint,
BuiltIn.Keys.Proxy
]);
if (string.IsNullOrEmpty(settings[BuiltIn.Keys.OpenAiApiKey].Value))
{
Logger.Error("OpenAI API key is not set");
return null;
}
var handler = new HttpClientHandler { AutomaticDecompression = DecompressionMethods.All };
if (settings[BuiltIn.Keys.Proxy].Value != null)
{
handler.UseProxy = true;
handler.Proxy = new WebProxy(settings[BuiltIn.Keys.Proxy].Value);
Logger.Debug($"Using proxy {settings[BuiltIn.Keys.Proxy].Value}");
}
using var client = new HttpClient(handler);
try
{
client.DefaultRequestHeaders.Authorization =
new AuthenticationHeaderValue("Bearer", settings[BuiltIn.Keys.OpenAiApiKey].Value);
var payload = new { input };
var json = JsonSerializer.Serialize(payload);
var content = new StringContent(json, Encoding.UTF8, "application/json");
var endpoint = settings[BuiltIn.Keys.OpenAiModerationEndpoint].Value
?? "https://api.openai.com/v1/moderations";
var response = await client.PostAsync(endpoint, content);
response.EnsureSuccessStatusCode();
var responseBody = await response.Content.ReadAsStringAsync();
var moderationResponse = JsonSerializer.Deserialize<ModerationResponse>(responseBody);
if (moderationResponse?.Results == null || moderationResponse.Results.Count == 0)
{
Logger.Error("No moderation results returned from OpenAI");
return null;
}
return moderationResponse.Results[0];
}
catch (Exception ex)
{
Logger.Error(ex, "Error while communicating with OpenAI Moderation API");
}
return null;
}
}