Compare commits

..

3 Commits

6 changed files with 776 additions and 37 deletions

View File

@ -6,9 +6,10 @@ from fastapi.middleware.cors import CORSMiddleware
from contextlib import asynccontextmanager
from typing import Dict, Any
import logging
import torch
from .models import (
TextRequest, TextListRequest, QARequest, FillMaskRequest,
TextRequest, TextListRequest, QARequest, FillMaskRequest, TextGenRequest,
SentimentResponse, NERResponse, QAResponse, FillMaskResponse,
ModerationResponse, TextGenResponse, BatchResponse
)
@ -278,20 +279,40 @@ async def moderate_content(request: TextRequest):
@app.post("/textgen", response_model=TextGenResponse)
async def generate_text(request: TextRequest):
"""Generate text from a prompt"""
async def generate_text(request: TextGenRequest):
"""Generate text from a prompt with configurable parameters"""
try:
if "textgen" not in pipelines:
raise HTTPException(status_code=503, detail="Text generation pipeline not available")
logging.info(f"Generating text for prompt: {request.text[:50]}...")
# Extract generation parameters
gen_params = {
"system_prompt": request.system_prompt,
"max_new_tokens": request.max_new_tokens,
"num_return_sequences": request.num_return_sequences,
"temperature": request.temperature,
"do_sample": request.do_sample
}
if request.model_name:
from src.pipelines.textgen import TextGenerator
textgen = TextGenerator(request.model_name)
result = textgen.generate(request.text)
try:
from src.pipelines.textgen import TextGenerator
textgen = TextGenerator(request.model_name)
result = textgen.generate(request.text, **gen_params)
except torch.cuda.OutOfMemoryError:
logging.warning(f"CUDA OOM with model {request.model_name}, trying with default model on CPU")
result = pipelines["textgen"].generate(request.text, **gen_params)
except Exception as model_error:
logging.error(f"Error with custom model {request.model_name}: {model_error}")
return TextGenResponse(
success=False,
prompt=request.text,
message=f"Erreur avec le modèle {request.model_name}: {str(model_error)}. Essayez avec le modèle par défaut."
)
else:
result = pipelines["textgen"].generate(request.text)
result = pipelines["textgen"].generate(request.text, **gen_params)
logging.info(f"Generation result keys: {list(result.keys())}")
@ -311,7 +332,9 @@ async def generate_text(request: TextRequest):
return TextGenResponse(
success=True,
prompt=result["prompt"],
generated_text=result["prompt"] + " " + generated_text
generated_text=generated_text,
parameters=result.get("parameters", {}),
generations=result.get("generations", [])
)
except Exception as e:
logging.error(f"TextGen endpoint error: {str(e)}", exc_info=True)

View File

@ -12,6 +12,17 @@ class TextRequest(BaseModel):
model_name: Optional[str] = None
class TextGenRequest(BaseModel):
"""Request model for text generation with configuration parameters"""
text: str
system_prompt: Optional[str] = None
model_name: Optional[str] = None
max_new_tokens: int = 500
num_return_sequences: int = 1
temperature: float = 1.0
do_sample: bool = True
class TextListRequest(BaseModel):
"""Request model for multiple texts"""
texts: List[str]
@ -76,6 +87,8 @@ class TextGenResponse(BaseResponse):
"""Response model for Text Generation"""
prompt: str
generated_text: Optional[str] = None
parameters: Optional[Dict[str, Any]] = None
generations: Optional[List[Dict[str, Any]]] = None
class BatchResponse(BaseResponse):

View File

@ -1,5 +1,7 @@
from click import prompt
from transformers import pipeline
from typing import Dict, List, Optional
import torch
from src.config import Config
@ -16,27 +18,55 @@ class TextGenerator:
self.model_name = model_name or Config.get_model("textgen")
print(f"Loading text generation model: {self.model_name}")
# Initialize pipeline with proper device configuration
self.pipeline = pipeline(
"text-generation",
model=self.model_name,
device=0 if Config.USE_GPU else -1,
torch_dtype="auto"
)
# Clear GPU cache before loading new model
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Try GPU first, fallback to CPU if CUDA OOM
try:
# Initialize pipeline with proper device configuration
self.pipeline = pipeline(
"text-generation",
model=self.model_name,
device=0 if Config.USE_GPU else -1,
torch_dtype="auto"
)
print(f"Model loaded successfully on {'GPU' if Config.USE_GPU else 'CPU'}!")
except torch.cuda.OutOfMemoryError:
print("⚠️ GPU out of memory, falling back to CPU...")
# Force CPU usage
self.pipeline = pipeline(
"text-generation",
model=self.model_name,
device=-1, # CPU
torch_dtype="auto"
)
print("Model loaded successfully on CPU!")
except Exception as e:
print(f"⚠️ Error loading model on GPU, trying CPU: {e}")
# Fallback to CPU
self.pipeline = pipeline(
"text-generation",
model=self.model_name,
device=-1, # CPU
torch_dtype="auto"
)
print("Model loaded successfully on CPU!")
# Set pad token if not available
if self.pipeline.tokenizer.pad_token is None:
self.pipeline.tokenizer.pad_token = self.pipeline.tokenizer.eos_token
print("Model loaded successfully!")
def generate(self, prompt: str, max_new_tokens: int = 100, num_return_sequences: int = 1,
temperature: float = 1.0, do_sample: bool = True) -> Dict:
def generate(self, prompt: str, system_prompt: Optional[str] = None, max_new_tokens: int = 500,
num_return_sequences: int = 1, temperature: float = 1.0, do_sample: bool = True) -> Dict:
"""
Generate text from a prompt
Args:
prompt: Input text prompt
system_prompt: Optional system prompt to set context/role
max_new_tokens: Maximum number of new tokens to generate
num_return_sequences: Number of sequences to generate
temperature: Sampling temperature (higher = more random)
@ -48,9 +78,14 @@ class TextGenerator:
if not prompt.strip():
return {"error": "Empty prompt"}
if system_prompt:
full_prompt = f"{system_prompt.strip()}\n\n{prompt.strip()}\n\n"
else:
full_prompt = f"{prompt.strip()}\n\n"
try:
results = self.pipeline(
prompt,
full_prompt,
max_new_tokens=max_new_tokens,
num_return_sequences=num_return_sequences,
temperature=temperature,
@ -58,17 +93,19 @@ class TextGenerator:
pad_token_id=self.pipeline.tokenizer.eos_token_id,
return_full_text=True
)
generations = [
{
"text": result["generated_text"],
"continuation": result["generated_text"][len(prompt):].strip()
"continuation": result["generated_text"][len(full_prompt):].strip()
}
for result in results
]
return {
"prompt": prompt,
"system_prompt": system_prompt,
"full_prompt": full_prompt,
"parameters": {
"max_new_tokens": max_new_tokens,
"num_sequences": num_return_sequences,
@ -77,7 +114,7 @@ class TextGenerator:
},
"generations": generations
}
except Exception as e:
return {"error": f"Generation error: {str(e)}"}

View File

@ -279,9 +279,16 @@
<div class="content-card">
<div class="card-header">
<h2>✍️ Génération de Texte</h2>
<p>Générez du texte créatif à partir d'un prompt</p>
<p>Générez du texte créatif à partir d'un prompt avec des paramètres configurables</p>
</div>
<form class="form" onsubmit="generateText(event)">
<div class="form-group">
<label for="textgenSystemPrompt">Prompt System (optionnel)</label>
<textarea id="textgenSystemPrompt" rows="2"
placeholder="You are a helpful assistant that..."></textarea>
<small>Définit le rôle ou le contexte du modèle (optionnel)</small>
</div>
<div class="form-group">
<label for="textgenPrompt">Prompt</label>
<textarea id="textgenPrompt" rows="4" placeholder="Enter your prompt..."
@ -290,11 +297,60 @@
<div class="form-group">
<label for="textgenModel">Modèle (optionnel)</label>
<select id="textgenModel">
<option value="">Modèle par défaut</option>
<option value="gpt2">GPT-2</option>
<option value="gpt2-medium">GPT-2 Medium</option>
<option value="">Modèle par défaut (GPT-2)</option>
<option value="distilgpt2">DistilGPT-2 (Très rapide, moins précis)</option>
<option value="gpt2">GPT-2 (Rapide)</option>
<option value="gpt2-medium">GPT-2 Medium (Lent, nécessite plus de mémoire)
</option>
<option value="gpt2-large">GPT-2 Large (Très lent, beaucoup de mémoire)</option>
<option value="gpt2-xl">GPT-2 XL (Extrêmement lent, mémoire élevée)</option>
<option value="meta-llama/Llama-3.2-3B-Instruct">Meta Llama 3.2 3B Instruct
</option>
<option value="meta-llama/Meta-Llama-3.1-8B-Instruct">Meta Llama 3.1 8B Instruct
</option>
<option value="mistralai/Mistral-7B-v0.1">Mistral 7B v0.1</option>
<option value="sanchit-gandhi/Mistral-3B-Instruct-v0.2">Mistral 3B Instruct v0.2
</option>
<option value="Qwen/Qwen3-0.6B">Qwen 3 0.6B</option>
<option value="Qwen/Qwen2.5-VL-3B-Instruct">Qwen 2.5 VL 3B Instruct</option>
<option value="Qwen/Qwen3-1.7B">Qwen 3 1.7B</option>
</select>
<small>⚠️ Les modèles plus gros peuvent nécessiter plus de mémoire GPU</small>
</div>
<!-- Configuration Parameters -->
<div class="form-group">
<label for="maxNewTokens">Nombre maximum de nouveaux tokens</label>
<input type="number" id="maxNewTokens" value="500" min="1" max="2048"
placeholder="500">
<small>Contrôle la longueur du texte généré (1-2048)</small>
</div>
<div class="form-group">
<label for="numReturnSequences">Nombre de séquences à générer</label>
<input type="number" id="numReturnSequences" value="1" min="1" max="5"
placeholder="1">
<small>Nombre de textes différents à générer (1-5)</small>
</div>
<div class="form-group">
<label for="temperature">Température</label>
<input type="number" id="temperature" value="1.0" min="0.1" max="2.0" step="0.1"
placeholder="1.0">
<small>Contrôle la créativité : plus bas = plus prévisible, plus haut = plus créatif
(0.1-2.0)</small>
</div>
<div class="form-group">
<label class="checkbox-label">
<input type="checkbox" id="doSample" checked>
<span class="checkbox-custom"></span>
Utiliser l'échantillonnage
</label>
<small>Active l'échantillonnage aléatoire pour plus de créativité</small>
</div>
<div class="form-actions">
<button type="button" class="btn btn-secondary" onclick="loadExample('textgen')">
<span class="btn-icon">💡</span>
@ -352,6 +408,8 @@
</main>
</div>
<!-- Bibliothèque Marked.js pour le rendu Markdown -->
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
<script src="script.js"></script>
</body>

View File

@ -4,7 +4,8 @@ let apiUrl = "http://localhost:8000";
// État de l'application
const appState = {
currentTab: "sentiment",
apiConnected: false
apiConnected: false,
textgenResults: {} // Pour stocker les textes originaux des résultats textgen
};
// Initialisation de l'application
@ -46,6 +47,125 @@ function setupFormHandlers() {
});
}
// Fonctions de gestion du Markdown
function renderMarkdown(text) {
try {
// Configuration de marked pour la sécurité et les fonctionnalités
if (typeof marked !== "undefined") {
marked.setOptions({
breaks: true, // Convertir les retours à la ligne en <br>
gfm: true, // GitHub Flavored Markdown
sanitize: false, // Permet le HTML (attention à la sécurité)
smartLists: true,
smartypants: true
});
return marked.parse(text);
} else {
console.warn("Marked.js non disponible, affichage du texte brut");
return escapeHtml(text).replace(/\n/g, "<br>");
}
} catch (error) {
console.error("Erreur lors du rendu Markdown:", error);
return escapeHtml(text).replace(/\n/g, "<br>");
}
}
function escapeHtml(text) {
const div = document.createElement("div");
div.textContent = text;
return div.innerHTML;
}
function toggleMarkdownView(containerId, text) {
const container = document.getElementById(containerId);
// Chercher dans le container principal ou dans un élément de génération
const textElement = container.querySelector(".text-content") || container.querySelector(".generation-text");
const toggleButton = container.querySelector(".markdown-toggle");
if (!textElement || !toggleButton) {
console.warn("Éléments non trouvés pour le toggle Markdown:", containerId);
return;
}
const isMarkdownView = toggleButton.dataset.view === "markdown";
if (isMarkdownView) {
// Passer en vue texte brut
textElement.innerHTML = escapeHtml(text).replace(/\n/g, "<br>");
textElement.classList.remove("markdown-content");
textElement.classList.add("raw-text");
toggleButton.innerHTML = "📝 Vue Markdown";
toggleButton.dataset.view = "raw";
} else {
// Passer en vue Markdown
textElement.innerHTML = renderMarkdown(text);
textElement.classList.remove("raw-text");
textElement.classList.add("markdown-content");
toggleButton.innerHTML = "📄 Texte brut";
toggleButton.dataset.view = "markdown";
}
}
// Nouvelle fonction pour gérer les toggles via data-attributes
function handleMarkdownToggle(button) {
const resultId = button.dataset.resultId;
const isMain = button.dataset.isMain === "true";
const genIndex = button.dataset.genIndex;
if (!resultId || !appState.textgenResults[resultId]) {
console.warn("Données du résultat textgen non trouvées");
return;
}
// Récupérer le texte original
let originalText;
if (isMain) {
originalText = appState.textgenResults[resultId].mainText;
} else if (genIndex !== undefined) {
originalText = appState.textgenResults[resultId].generations[parseInt(genIndex)];
} else {
console.warn("Type de texte non identifié");
return;
}
// Trouver le container et l'élément de texte
const container = button.closest(".textgen-output, .generation-item");
if (!container) {
console.warn("Container non trouvé");
return;
}
const textElement = container.querySelector(".text-content, .generation-text");
if (!textElement) {
console.warn("Élément de texte non trouvé");
return;
}
const isMarkdownView = button.dataset.view === "markdown";
if (isMarkdownView) {
// Passer en vue texte brut
textElement.innerHTML = escapeHtml(originalText).replace(/\n/g, "<br>");
textElement.classList.remove("markdown-content");
textElement.classList.add("raw-text");
button.innerHTML = "📝 Vue Markdown";
button.dataset.view = "raw";
} else {
// Passer en vue Markdown
textElement.innerHTML = renderMarkdown(originalText);
textElement.classList.remove("raw-text");
textElement.classList.add("markdown-content");
button.innerHTML = "📄 Texte brut";
button.dataset.view = "markdown";
}
}
function decodeHtml(html) {
const txt = document.createElement("textarea");
txt.innerHTML = html;
return txt.value;
}
// Navigation
function switchTab(tabName) {
// Mise à jour de l'état
@ -305,6 +425,9 @@ function showResult(containerId, data, isError = false) {
`;
container.classList.add("show");
// Ajouter les event listeners pour les boutons markdown-toggle
setupMarkdownToggleListeners(container);
// Animation d'entrée
const resultCard = container.querySelector(".result-card");
resultCard.style.transform = "translateY(20px)";
@ -315,6 +438,36 @@ function showResult(containerId, data, isError = false) {
}, 10);
}
// Fonction pour configurer les event listeners des boutons markdown
function setupMarkdownToggleListeners(container) {
// Event listeners pour les boutons markdown toggle
const toggleButtons = container.querySelectorAll(".markdown-toggle");
toggleButtons.forEach((button) => {
button.removeEventListener("click", handleMarkdownToggleClick);
button.addEventListener("click", handleMarkdownToggleClick);
});
// Event listeners pour les boutons de copie
const copyButtons = container.querySelectorAll(".copy-text-btn");
copyButtons.forEach((button) => {
button.removeEventListener("click", handleCopyTextClick);
button.addEventListener("click", handleCopyTextClick);
});
}
// Event handler pour les clics sur les boutons markdown toggle
function handleMarkdownToggleClick(event) {
event.preventDefault();
handleMarkdownToggle(event.target);
}
// Event handler pour les clics sur les boutons de copie
function handleCopyTextClick(event) {
event.preventDefault();
const textToCopy = decodeHtml(event.target.dataset.text);
copyToClipboard(textToCopy);
}
function formatErrorResult(error) {
let errorMessage = "Une erreur inattendue s'est produite";
let suggestions = [];
@ -330,6 +483,14 @@ function formatErrorResult(error) {
suggestions.push("Assurez-vous que tous les champs obligatoires sont remplis");
} else if (errorMessage.includes("MASK")) {
suggestions.push("Utilisez [MASK] dans votre texte pour le fill-mask");
} else if (errorMessage.includes("CUDA out of memory") || errorMessage.includes("mémoire")) {
suggestions.push("🔧 Essayez un modèle plus petit (GPT-2 au lieu de GPT-2 Medium)");
suggestions.push("📉 Réduisez le nombre de tokens à générer");
suggestions.push("🔢 Réduisez le nombre de séquences à générer");
suggestions.push("💻 Le modèle utilisera automatiquement le CPU si le GPU manque de mémoire");
} else if (errorMessage.includes("model") || errorMessage.includes("modèle")) {
suggestions.push("Essayez avec le modèle par défaut");
suggestions.push("Vérifiez que le nom du modèle est correct");
}
}
@ -634,19 +795,117 @@ function formatModerationResult(data) {
function formatTextgenResult(data) {
const generatedText = data.generated_text || "Aucun texte généré";
const prompt = data.prompt || "";
const systemPrompt = data.system_prompt || "";
const parameters = data.parameters || {};
const generations = data.generations || [];
// Créer un ID unique pour ce résultat
const resultId = "textgen_result_" + Date.now() + "_" + Math.random().toString(36).substr(2, 9);
// Stocker les données dans l'état global
appState.textgenResults[resultId] = {
mainText: generatedText,
generations: generations.map((gen) => gen.text || gen.continuation || "")
};
// Formatage du system prompt s'il existe
const systemPromptHtml = systemPrompt
? `
<div class="textgen-system-prompt">
<h4>🤖 Prompt System:</h4>
<div class="system-prompt-text">${systemPrompt}</div>
</div>
`
: "";
// Formatage des paramètres utilisés
const parametersHtml =
Object.keys(parameters).length > 0
? `
<div class="textgen-parameters">
<h4> Paramètres utilisés:</h4>
<div class="parameters-grid">
<div class="param-item">
<span class="param-label">Tokens max:</span>
<span class="param-value">${parameters.max_new_tokens || "N/A"}</span>
</div>
<div class="param-item">
<span class="param-label">Séquences:</span>
<span class="param-value">${parameters.num_sequences || "N/A"}</span>
</div>
<div class="param-item">
<span class="param-label">Température:</span>
<span class="param-value">${parameters.temperature || "N/A"}</span>
</div>
<div class="param-item">
<span class="param-label">Échantillonnage:</span>
<span class="param-value">${parameters.do_sample ? "✅" : "❌"}</span>
</div>
</div>
</div>
`
: "";
// Formatage des générations multiples
const generationsHtml =
generations.length > 1
? `
<div class="textgen-generations">
<h4>🎯 Générations alternatives (${generations.length}):</h4>
<div class="generations-container">
${generations
.map((gen, index) => {
const genText = gen.text || gen.continuation || "Aucun texte";
const genId = `${resultId}_generation_${index}`;
return `
<div class="generation-item" data-index="${index}" id="${genId}">
<div class="generation-header">
<span class="generation-number">Génération ${index + 1}</span>
<button class="btn-minimal markdown-toggle"
data-view="markdown"
data-result-id="${resultId}"
data-gen-index="${index}">
📄 Texte brut
</button>
<button class="btn-minimal copy-text-btn"
data-text="${escapeHtml(genText)}">
📋 Copier
</button>
</div>
<div class="generation-text markdown-content">${renderMarkdown(genText)}</div>
</div>
`;
})
.join("")}
</div>
</div>
`
: "";
return `
<div class="textgen-result">
${systemPromptHtml}
<div class="textgen-prompt">
<h4>📝 Prompt initial:</h4>
<div class="prompt-text">${prompt}</div>
</div>
<div class="textgen-output">
${parametersHtml}
<div class="textgen-output" id="${resultId}_main">
<h4> Texte généré:</h4>
<div class="generated-text">
<div class="text-content">${generatedText}</div>
<div class="text-content markdown-content">${renderMarkdown(generatedText)}</div>
<div class="text-actions">
<button class="btn-minimal" onclick="copyToClipboard('${generatedText.replace(/'/g, "\\'")}')">
<button class="btn-minimal markdown-toggle"
data-view="markdown"
data-result-id="${resultId}"
data-is-main="true">
📄 Texte brut
</button>
<button class="btn-minimal copy-text-btn"
data-text="${escapeHtml(generatedText)}">
📋 Copier le texte
</button>
<button class="btn-minimal" onclick="regenerateText()">
@ -655,6 +914,9 @@ function formatTextgenResult(data) {
</div>
</div>
</div>
${generationsHtml}
<details class="result-details">
<summary>🔍 Détails techniques</summary>
<div class="result-json">${JSON.stringify(data, null, 2)}</div>
@ -816,16 +1078,51 @@ async function generateText(event) {
showLoading("textgenResult");
const text = document.getElementById("textgenPrompt").value.trim();
const systemPrompt = document.getElementById("textgenSystemPrompt").value.trim();
const model = document.getElementById("textgenModel").value;
const maxNewTokens = parseInt(document.getElementById("maxNewTokens").value) || 500;
const numReturnSequences = parseInt(document.getElementById("numReturnSequences").value) || 1;
const temperature = parseFloat(document.getElementById("temperature").value) || 1.0;
const doSample = document.getElementById("doSample").checked;
if (!text) {
showResult("textgenResult", { error: "Le prompt est requis" }, true);
return;
}
// Validate parameters
if (maxNewTokens < 1 || maxNewTokens > 2048) {
showResult("textgenResult", { error: "Le nombre de tokens doit être entre 1 et 2048" }, true);
return;
}
if (numReturnSequences < 1 || numReturnSequences > 5) {
showResult("textgenResult", { error: "Le nombre de séquences doit être entre 1 et 5" }, true);
return;
}
if (temperature < 0.1 || temperature > 2.0) {
showResult("textgenResult", { error: "La température doit être entre 0.1 et 2.0" }, true);
return;
}
try {
const data = { text };
if (model) data.model_name = model;
const data = {
text,
max_new_tokens: maxNewTokens,
num_return_sequences: numReturnSequences,
temperature: temperature,
do_sample: doSample
};
// Add system prompt if provided
if (systemPrompt) {
data.system_prompt = systemPrompt;
}
if (model) {
data.model_name = model;
}
const result = await makeApiRequest("/textgen", data);
showResult("textgenResult", result);
@ -875,7 +1172,10 @@ const examples = {
},
fillmask: "The capital of France is [MASK].",
moderation: "This project is fantastic! Thank you for this excellent work.",
textgen: "Once upon a time, in a distant kingdom,",
textgen: {
prompt: "Generate 10 ideas of what to build with a rapsberry pi",
systemPrompt: "You are a helpful AI assistant. Format your responses using complex Markdown (Title, lists etc..). Be clear and structured."
},
batch: `I love this product!
This is really terrible.
Not bad at all.
@ -906,7 +1206,12 @@ function loadExample(type) {
document.getElementById("moderationText").value = example;
break;
case "textgen":
document.getElementById("textgenPrompt").value = example;
if (typeof example === "object") {
document.getElementById("textgenPrompt").value = example.prompt;
document.getElementById("textgenSystemPrompt").value = example.systemPrompt;
} else {
document.getElementById("textgenPrompt").value = example;
}
break;
case "batch":
document.getElementById("batchTexts").value = example;

View File

@ -89,7 +89,7 @@ body {
}
.container {
max-width: 1200px;
max-width: 1600px;
margin: 0 auto;
padding: 0 var(--space-4);
width: 100%;
@ -570,6 +570,60 @@ select {
cursor: pointer;
}
/* Checkbox styles */
.checkbox-label {
display: flex;
align-items: center;
gap: var(--space-3);
cursor: pointer;
font-weight: 400;
margin-bottom: 0;
}
.checkbox-label input[type="checkbox"] {
display: none;
}
.checkbox-custom {
display: inline-block;
width: 20px;
height: 20px;
border: 2px solid var(--gray-300);
border-radius: var(--radius-sm);
background: white;
transition: var(--transition);
position: relative;
}
.checkbox-label input[type="checkbox"]:checked + .checkbox-custom {
background: var(--primary-500);
border-color: var(--primary-500);
}
.checkbox-label input[type="checkbox"]:checked + .checkbox-custom::after {
content: "✓";
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: white;
font-size: 12px;
font-weight: bold;
}
.checkbox-label:hover .checkbox-custom {
border-color: var(--primary-400);
}
/* Small text for form hints */
small {
display: block;
margin-top: var(--space-1);
font-size: 0.75rem;
color: var(--gray-500);
line-height: 1.4;
}
.form-actions {
display: flex;
gap: var(--space-3);
@ -1434,3 +1488,252 @@ select {
transform: rotate(360deg);
}
}
/* Text Generation specific styles */
.textgen-parameters {
margin: var(--space-4) 0;
padding: var(--space-4);
background: var(--gray-50);
border-radius: var(--radius);
border: 1px solid var(--gray-200);
}
.parameters-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: var(--space-3);
margin-top: var(--space-3);
}
.param-item {
display: flex;
justify-content: space-between;
align-items: center;
padding: var(--space-2) var(--space-3);
background: white;
border-radius: var(--radius-sm);
border: 1px solid var(--gray-200);
}
.param-label {
font-weight: 500;
color: var(--gray-700);
font-size: 0.875rem;
}
.param-value {
font-weight: 600;
color: var(--primary-600);
font-size: 0.875rem;
}
.textgen-generations {
margin: var(--space-4) 0;
}
.generations-container {
display: flex;
flex-direction: column;
gap: var(--space-3);
margin-top: var(--space-3);
}
.generation-item {
border: 1px solid var(--gray-200);
border-radius: var(--radius);
overflow: hidden;
}
.generation-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: var(--space-3);
background: var(--gray-50);
border-bottom: 1px solid var(--gray-200);
}
.generation-number {
font-weight: 600;
color: var(--gray-700);
font-size: 0.875rem;
}
.generation-text {
padding: var(--space-4);
background: white;
color: var(--gray-800);
line-height: 1.6;
white-space: pre-wrap;
word-wrap: break-word;
}
/* System prompt specific styles */
.textgen-system-prompt {
margin: var(--space-4) 0;
padding: var(--space-4);
background: var(--primary-50);
border-radius: var(--radius);
border: 1px solid var(--primary-200);
border-left: 4px solid var(--primary-500);
}
.system-prompt-text {
padding: var(--space-3);
background: white;
border-radius: var(--radius-sm);
border: 1px solid var(--primary-200);
color: var(--gray-800);
line-height: 1.6;
font-style: italic;
white-space: pre-wrap;
word-wrap: break-word;
}
/* Markdown content styles */
.markdown-content {
line-height: 1.7;
}
.markdown-content h1,
.markdown-content h2,
.markdown-content h3,
.markdown-content h4,
.markdown-content h5,
.markdown-content h6 {
color: var(--gray-900);
font-weight: 600;
margin: var(--space-4) 0 var(--space-2) 0;
}
.markdown-content h1 {
font-size: 1.875rem;
border-bottom: 2px solid var(--gray-200);
padding-bottom: var(--space-2);
}
.markdown-content h2 {
font-size: 1.5rem;
border-bottom: 1px solid var(--gray-200);
padding-bottom: var(--space-1);
}
.markdown-content h3 {
font-size: 1.25rem;
}
.markdown-content h4 {
font-size: 1.125rem;
}
.markdown-content p {
margin: var(--space-3) 0;
color: var(--gray-700);
}
.markdown-content ul,
.markdown-content ol {
margin: var(--space-3) 0;
padding-left: var(--space-6);
}
.markdown-content li {
margin: var(--space-1) 0;
color: var(--gray-700);
}
.markdown-content blockquote {
margin: var(--space-4) 0;
padding: var(--space-3) var(--space-4);
background: var(--gray-50);
border-left: 4px solid var(--primary-500);
border-radius: 0 var(--radius) var(--radius) 0;
}
.markdown-content blockquote p {
margin: 0;
font-style: italic;
color: var(--gray-600);
}
.markdown-content code {
background: var(--gray-100);
padding: 2px 6px;
border-radius: var(--radius-sm);
font-family: "Courier New", Courier, monospace;
font-size: 0.875rem;
color: var(--primary-700);
}
.markdown-content pre {
background: var(--gray-900);
color: var(--gray-100);
padding: var(--space-4);
border-radius: var(--radius);
overflow-x: auto;
margin: var(--space-4) 0;
}
.markdown-content pre code {
background: transparent;
padding: 0;
color: inherit;
}
.markdown-content table {
width: 100%;
border-collapse: collapse;
margin: var(--space-4) 0;
}
.markdown-content th,
.markdown-content td {
border: 1px solid var(--gray-300);
padding: var(--space-2) var(--space-3);
text-align: left;
}
.markdown-content th {
background: var(--gray-50);
font-weight: 600;
color: var(--gray-900);
}
.markdown-content a {
color: var(--primary-600);
text-decoration: underline;
}
.markdown-content a:hover {
color: var(--primary-700);
}
.markdown-content strong {
font-weight: 600;
color: var(--gray-900);
}
.markdown-content em {
font-style: italic;
}
.markdown-content hr {
border: none;
height: 1px;
background: var(--gray-300);
margin: var(--space-6) 0;
}
/* Raw text styles */
.raw-text {
white-space: pre-wrap;
word-wrap: break-word;
font-family: "Courier New", Courier, monospace;
font-size: 0.875rem;
line-height: 1.5;
color: var(--gray-700);
background: var(--gray-50);
padding: var(--space-3);
border-radius: var(--radius);
border: 1px solid var(--gray-200);
}