add parameters to ai response
Some checks failed
Docker / build (push) Has been cancelled

This commit is contained in:
Ayden Jahola 2025-02-25 04:37:46 +00:00
parent 0ddaf4caea
commit fed3903db8
No known key found for this signature in database
GPG key ID: 71DD90AE4AE92742
4 changed files with 119 additions and 0 deletions

71
commands/ai/chat.js Normal file
View file

@ -0,0 +1,71 @@
const { SlashCommandBuilder } = require("discord.js");
const { getAIResponse } = require("../../utils/aiAPI");
const QuotaUsage = require("../../models/QuotaUsage");
module.exports = {
data: new SlashCommandBuilder()
.setName("chat")
.setDescription("Chat with AI")
.addStringOption((option) =>
option
.setName("message")
.setDescription("Your message to the AI")
.setRequired(true)
),
async execute(interaction) {
await interaction.deferReply(); // Defer initial response
try {
const guildId = interaction.guild.id;
const input = interaction.options.getString("message");
const currentMonthYear = new Date().toISOString().slice(0, 7);
// Check quota first
let quotaUsage = await QuotaUsage.findOne({
guildId,
monthYear: currentMonthYear,
});
if (!quotaUsage) {
quotaUsage = new QuotaUsage({ guildId, monthYear: currentMonthYear });
await quotaUsage.save();
}
if (quotaUsage.quotaUsed >= quotaUsage.quotaLimit) {
return await interaction.editReply(
"❌ This server has reached its monthly usage limit."
);
}
// Update quota after successful check
quotaUsage.quotaUsed += 1;
await quotaUsage.save();
// Get AI response with retry logic
const model_name = "facebook/blenderbot-3B";
let aiResponse = null;
let attempts = 0;
while (!aiResponse && attempts < 3) {
attempts++;
aiResponse = await getAIResponse(model_name, input);
if (!aiResponse) {
await interaction.editReply(`⏳ AI is waking up...`);
}
}
if (aiResponse) {
await interaction.editReply(aiResponse.generated_text);
} else {
await interaction.editReply(
"❌ Failed to get response after 3 attempts. Please try again later."
);
}
} catch (error) {
console.error("Command Error:", error);
await interaction.editReply(
"⚠️ An error occurred. Please try again later."
);
}
},
};

14
models/QuotaUsage.js Normal file
View file

@ -0,0 +1,14 @@
const mongoose = require("mongoose");
const quotaUsageSchema = new mongoose.Schema({
guildId: { type: String, required: true, unique: true },
monthYear: {
type: String,
default: () => new Date().toISOString().substr(0, 7),
},
quotaUsed: { type: Number, default: 0 },
quotaLimit: { type: Number, default: 100 }, // Set your desired limit
});
const QuotaUsage = mongoose.model("QuotaUsage", quotaUsageSchema);
module.exports = QuotaUsage;

View file

@ -19,6 +19,7 @@
"moment": "^2.30.1",
"mongoose": "^8.6.0",
"nodemailer": "^6.9.14",
"openai": "^4.85.4",
"owoify-js": "^2.0.0",
"puppeteer": "^23.4.1",
"rcon-client": "^4.2.5",

33
utils/aiAPI.js Normal file
View file

@ -0,0 +1,33 @@
const axios = require("axios");
const API_KEY = process.env.HUGGING_FACE_API_KEY;
const getAIResponse = async (model_name, input) => {
try {
const response = await axios.post(
`https://api-inference.huggingface.co/models/${model_name}`,
{ inputs: input, parameters: { max_length: 100 } },
{
headers: { Authorization: `Bearer ${API_KEY}` },
}
);
// Handle model loading state
if (response.data.error?.includes("loading")) {
console.log(`Model ${model_name} is loading, retrying...`);
return null;
}
return response.data[0] || null;
} catch (error) {
console.error("API Error:", error.message);
// Handle specific error cases
if (error.response?.data?.error) {
console.log("HF API Error:", error.response.data.error);
}
return null;
}
};
module.exports = { getAIResponse };