This commit is contained in:
stef
2026-04-25 23:14:07 +02:00
commit 9fef8ec260
7 changed files with 1184 additions and 0 deletions

857
main.go Normal file
View File

@@ -0,0 +1,857 @@
package main
import (
"context"
"crypto/tls"
"encoding/csv"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"math"
"net/http"
"os"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
)
type TimeSeriesMetric struct {
Timestamp time.Time `json:"timestamp"`
Requests int64 `json:"requests"`
Success int64 `json:"success"`
Errors int64 `json:"errors"`
AvgLatency time.Duration `json:"avg_latency_ms"`
MinLatency time.Duration `json:"min_latency_ms"`
MaxLatency time.Duration `json:"max_latency_ms"`
RPS float64 `json:"rps"`
ErrorRate float64 `json:"error_rate_percent"`
}
type AggregatedMetrics struct {
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
TotalRequests int64 `json:"total_requests"`
SuccessRequests int64 `json:"success_requests"`
ErrorRequests int64 `json:"error_requests"`
ErrorRate float64 `json:"error_rate_percent"`
AvgRPS float64 `json:"avg_rps"`
LatencyAvg time.Duration `json:"latency_avg_ms"`
LatencyMedian time.Duration `json:"latency_median_ms"`
LatencyP95 time.Duration `json:"latency_p95_ms"`
LatencyP99 time.Duration `json:"latency_p99_ms"`
LatencyMin time.Duration `json:"latency_min_ms"`
LatencyMax time.Duration `json:"latency_max_ms"`
ErrorsByType map[string]int64 `json:"errors_by_type"`
TimeSeries []TimeSeriesMetric `json:"time_series"`
}
type Metrics struct {
totalReqs int64
successReqs int64
errorReqs int64
latencies []time.Duration
errors map[string]int64
timeSeries []TimeSeriesMetric
currentWindow *WindowMetrics
mu sync.RWMutex // Changé de Mutex à RWMutex
startTime time.Time
endTime time.Time
windowSize time.Duration
}
type WindowMetrics struct {
startTime time.Time
requests int64
success int64
errors int64
latencies []time.Duration
minLatency time.Duration
maxLatency time.Duration
mu sync.Mutex // Mutex spécifique pour la fenêtre
}
type Result struct {
latency time.Duration
err error
status int
}
func NewMetrics(windowSize time.Duration) *Metrics {
return &Metrics{
errors: make(map[string]int64),
timeSeries: make([]TimeSeriesMetric, 0),
windowSize: windowSize,
currentWindow: &WindowMetrics{
startTime: time.Now(),
latencies: make([]time.Duration, 0),
minLatency: time.Hour,
maxLatency: 0,
},
}
}
func (m *Metrics) AddResult(latency time.Duration, err error, status int) {
atomic.AddInt64(&m.totalReqs, 1)
// Gestion des erreurs
if err != nil || status >= 400 {
atomic.AddInt64(&m.errorReqs, 1)
m.mu.Lock()
if err != nil {
m.errors[err.Error()]++
} else {
m.errors[fmt.Sprintf("HTTP_%d", status)]++
}
m.mu.Unlock()
} else {
atomic.AddInt64(&m.successReqs, 1)
m.mu.Lock()
m.latencies = append(m.latencies, latency)
m.mu.Unlock()
}
// Mise à jour de la fenêtre courante avec son propre mutex
m.currentWindow.mu.Lock()
m.currentWindow.requests++
if err == nil && status < 400 {
m.currentWindow.success++
m.currentWindow.latencies = append(m.currentWindow.latencies, latency)
if latency < m.currentWindow.minLatency {
m.currentWindow.minLatency = latency
}
if latency > m.currentWindow.maxLatency {
m.currentWindow.maxLatency = latency
}
} else {
m.currentWindow.errors++
}
m.currentWindow.mu.Unlock()
// Vérifier si on doit fermer la fenêtre
now := time.Now()
if now.Sub(m.currentWindow.startTime) >= m.windowSize {
m.closeCurrentWindow(now)
}
}
func (m *Metrics) closeCurrentWindow(now time.Time) {
m.currentWindow.mu.Lock()
defer m.currentWindow.mu.Unlock()
if m.currentWindow.requests == 0 {
// Réinitialiser la fenêtre même si vide
m.currentWindow.startTime = now
return
}
// Calculer la latence moyenne de la fenêtre
var avgLatency time.Duration
if len(m.currentWindow.latencies) > 0 {
var sum int64 = 0
for _, lat := range m.currentWindow.latencies {
sum += int64(lat)
}
avgLatency = time.Duration(sum / int64(len(m.currentWindow.latencies)))
}
// Calculer la durée réelle (minimum 1ms pour éviter division par zéro)
duration := now.Sub(m.currentWindow.startTime)
if duration <= 0 {
duration = 1 * time.Millisecond
}
// Calculer le RPS avec vérification des bornes
rps := float64(m.currentWindow.requests) / duration.Seconds()
// Vérifier les valeurs aberrantes (NaN, Inf, négatives)
if math.IsNaN(rps) || math.IsInf(rps, 0) || rps < 0 {
log.Printf("Warning: Invalid RPS value detected: %f, using 0", rps)
rps = 0
}
// Calculer le taux d'erreur
errorRate := 0.0
if m.currentWindow.requests > 0 {
errorRate = float64(m.currentWindow.errors) / float64(m.currentWindow.requests) * 100
}
// Gérer le cas où minLatency n'a pas été mis à jour
minLatency := m.currentWindow.minLatency
if minLatency == time.Hour {
minLatency = 0
}
metric := TimeSeriesMetric{
Timestamp: m.currentWindow.startTime,
Requests: m.currentWindow.requests,
Success: m.currentWindow.success,
Errors: m.currentWindow.errors,
AvgLatency: avgLatency,
MinLatency: minLatency,
MaxLatency: m.currentWindow.maxLatency,
RPS: rps,
ErrorRate: errorRate,
}
m.mu.Lock()
m.timeSeries = append(m.timeSeries, metric)
m.mu.Unlock()
// Réinitialiser la fenêtre
m.currentWindow = &WindowMetrics{
startTime: now,
latencies: make([]time.Duration, 0),
minLatency: time.Hour,
maxLatency: 0,
}
}
func (m *Metrics) SetEndTime() {
m.endTime = time.Now()
m.closeCurrentWindow(m.endTime)
}
func (m *Metrics) GetAggregatedMetrics() *AggregatedMetrics {
m.mu.RLock()
defer m.mu.RUnlock()
total := atomic.LoadInt64(&m.totalReqs)
success := atomic.LoadInt64(&m.successReqs)
errors := atomic.LoadInt64(&m.errorReqs)
duration := m.endTime.Sub(m.startTime)
if duration <= 0 {
duration = 1 * time.Millisecond
}
errorRate := 0.0
if total > 0 {
errorRate = float64(errors) / float64(total) * 100
}
avgRPS := float64(total) / duration.Seconds()
if math.IsNaN(avgRPS) || math.IsInf(avgRPS, 0) {
avgRPS = 0
}
// Calculer les percentiles
var avgLatency, median, p95, p99, minLat, maxLat time.Duration
if len(m.latencies) > 0 {
sorted := make([]time.Duration, len(m.latencies))
copy(sorted, m.latencies)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i] < sorted[j]
})
// Éviter les overflow dans la somme
var sum int64 = 0
for _, lat := range sorted {
sum += int64(lat)
}
avgLatency = time.Duration(sum / int64(len(sorted)))
median = sorted[len(sorted)/2]
p95 = percentile(sorted, 0.95)
p99 = percentile(sorted, 0.99)
minLat = sorted[0]
maxLat = sorted[len(sorted)-1]
}
return &AggregatedMetrics{
StartTime: m.startTime,
EndTime: m.endTime,
TotalRequests: total,
SuccessRequests: success,
ErrorRequests: errors,
ErrorRate: errorRate,
AvgRPS: avgRPS,
LatencyAvg: avgLatency,
LatencyMedian: median,
LatencyP95: p95,
LatencyP99: p99,
LatencyMin: minLat,
LatencyMax: maxLat,
ErrorsByType: m.errors,
TimeSeries: m.timeSeries,
}
}
func percentile(data []time.Duration, p float64) time.Duration {
if len(data) == 0 {
return 0
}
idx := int(float64(len(data)) * p)
if idx >= len(data) {
idx = len(data) - 1
}
if idx < 0 {
idx = 0
}
return data[idx]
}
// Export CSV avec validation
func (m *AggregatedMetrics) ExportCSV(filename string) error {
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
writer := csv.NewWriter(file)
defer writer.Flush()
// En-têtes CSV
headers := []string{
"Timestamp", "Requests", "Success", "Errors", "ErrorRate(%)",
"AvgLatency(ms)", "MinLatency(ms)", "MaxLatency(ms)", "RPS",
}
if err := writer.Write(headers); err != nil {
return err
}
// Données avec validation
for _, metric := range m.TimeSeries {
// S'assurer que RPS n'est pas négatif ou invalide
rps := metric.RPS
if math.IsNaN(rps) || math.IsInf(rps, 0) || rps < 0 {
rps = 0
}
row := []string{
metric.Timestamp.Format("2006-01-02 15:04:05"),
fmt.Sprintf("%d", metric.Requests),
fmt.Sprintf("%d", metric.Success),
fmt.Sprintf("%d", metric.Errors),
fmt.Sprintf("%.2f", math.Max(0, metric.ErrorRate)),
fmt.Sprintf("%.2f", math.Max(0, float64(metric.AvgLatency.Milliseconds()))),
fmt.Sprintf("%.2f", math.Max(0, float64(metric.MinLatency.Milliseconds()))),
fmt.Sprintf("%.2f", math.Max(0, float64(metric.MaxLatency.Milliseconds()))),
fmt.Sprintf("%.2f", rps),
}
if err := writer.Write(row); err != nil {
return err
}
}
return nil
}
// Export JSON
func (m *AggregatedMetrics) ExportJSON(filename string) error {
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
encoder := json.NewEncoder(file)
encoder.SetIndent("", " ")
return encoder.Encode(m)
}
// Génération HTML avec graphique et validation
func (m *AggregatedMetrics) GenerateHTMLReport(filename string) error {
// Nettoyer les données pour HTML
cleanedData := m.cleanTimeSeriesData()
html := `<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Stress Test Report</title>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<style>
body { font-family: Arial, sans-serif; margin: 20px; background: #f5f5f5; }
.container { max-width: 1200px; margin: auto; background: white; padding: 20px; border-radius: 10px; }
h1, h2 { color: #333; }
.metrics-grid { display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 15px; margin: 20px 0; }
.metric-card { background: #f9f9f9; padding: 15px; border-radius: 5px; border-left: 4px solid #007bff; }
.metric-value { font-size: 24px; font-weight: bold; margin: 10px 0; }
.metric-label { color: #666; font-size: 12px; text-transform: uppercase; }
canvas { margin: 20px 0; max-height: 400px; }
.error-list { max-height: 300px; overflow-y: auto; }
.error-item { padding: 5px; border-bottom: 1px solid #eee; }
</style>
</head>
<body>
<div class="container">
<h1>Stress Test Report</h1>
<p>Generated: ` + time.Now().Format("2006-01-02 15:04:05") + `</p>
<h2>Summary Metrics</h2>
<div class="metrics-grid">
<div class="metric-card">
<div class="metric-label">Total Requests</div>
<div class="metric-value">` + fmt.Sprintf("%d", m.TotalRequests) + `</div>
</div>
<div class="metric-card">
<div class="metric-label">Success Rate</div>
<div class="metric-value">` + fmt.Sprintf("%.2f", math.Max(0, 100-m.ErrorRate)) + `%</div>
</div>
<div class="metric-card">
<div class="metric-label">Avg RPS</div>
<div class="metric-value">` + fmt.Sprintf("%.2f", math.Max(0, m.AvgRPS)) + `</div>
</div>
<div class="metric-card">
<div class="metric-label">Avg Latency</div>
<div class="metric-value">` + fmt.Sprintf("%.2f", math.Max(0, float64(m.LatencyAvg.Milliseconds()))) + ` ms</div>
</div>
<div class="metric-card">
<div class="metric-label">P95 Latency</div>
<div class="metric-value">` + fmt.Sprintf("%.2f", math.Max(0, float64(m.LatencyP95.Milliseconds()))) + ` ms</div>
</div>
<div class="metric-card">
<div class="metric-label">P99 Latency</div>
<div class="metric-value">` + fmt.Sprintf("%.2f", math.Max(0, float64(m.LatencyP99.Milliseconds()))) + ` ms</div>
</div>
</div>
<h2>RPS Over Time</h2>
<canvas id="rpsChart"></canvas>
<h2>Latency Over Time</h2>
<canvas id="latencyChart"></canvas>
<h2>Error Rate Over Time</h2>
<canvas id="errorChart"></canvas>
<h2>Errors by Type</h2>
<div class="error-list">
` + m.generateErrorList() + `
</div>
</div>
<script>
const timeLabels = ` + m.generateTimeLabels(cleanedData) + `;
// RPS Chart
new Chart(document.getElementById('rpsChart'), {
type: 'line',
data: {
labels: timeLabels,
datasets: [{
label: 'Requests per Second',
data: [` + m.generateFloatArray(cleanedData, func(t TimeSeriesMetric) float64 { return math.Max(0, t.RPS) }) + `],
borderColor: 'rgb(75, 192, 192)',
backgroundColor: 'rgba(75, 192, 192, 0.1)',
tension: 0.1,
fill: true
}]
},
options: {
responsive: true,
maintainAspectRatio: true,
plugins: {
legend: { position: 'top' },
tooltip: { mode: 'index', intersect: false }
},
scales: {
y: {
beginAtZero: true,
title: { display: true, text: 'Requests per Second' }
},
x: {
title: { display: true, text: 'Time' },
ticks: {
maxRotation: 45,
minRotation: 45,
autoSkip: true,
maxTicksLimit: 15
}
}
}
}
});
// Latency Chart
new Chart(document.getElementById('latencyChart'), {
type: 'line',
data: {
labels: timeLabels,
datasets: [
{
label: 'Avg Latency (ms)',
data: [` + m.generateFloatArray(cleanedData, func(t TimeSeriesMetric) float64 { return math.Max(0, float64(t.AvgLatency.Milliseconds())) }) + `],
borderColor: 'rgb(255, 99, 132)',
backgroundColor: 'rgba(255, 99, 132, 0.1)',
tension: 0.1,
fill: true
},
{
label: 'Min Latency (ms)',
data: [` + m.generateFloatArray(cleanedData, func(t TimeSeriesMetric) float64 { return math.Max(0, float64(t.MinLatency.Milliseconds())) }) + `],
borderColor: 'rgb(54, 162, 235)',
backgroundColor: 'rgba(54, 162, 235, 0.1)',
tension: 0.1,
fill: true
},
{
label: 'Max Latency (ms)',
data: [` + m.generateFloatArray(cleanedData, func(t TimeSeriesMetric) float64 { return math.Max(0, float64(t.MaxLatency.Milliseconds())) }) + `],
borderColor: 'rgb(255, 206, 86)',
backgroundColor: 'rgba(255, 206, 86, 0.1)',
tension: 0.1,
fill: true
}
]
},
options: {
responsive: true,
maintainAspectRatio: true,
plugins: {
legend: { position: 'top' },
tooltip: { mode: 'index', intersect: false }
},
scales: {
y: {
beginAtZero: true,
title: { display: true, text: 'Latency (ms)' }
},
x: {
title: { display: true, text: 'Time' },
ticks: {
maxRotation: 45,
minRotation: 45,
autoSkip: true,
maxTicksLimit: 15
}
}
}
}
});
// Error Rate Chart
new Chart(document.getElementById('errorChart'), {
type: 'line',
data: {
labels: timeLabels,
datasets: [{
label: 'Error Rate (%)',
data: [` + m.generateFloatArray(cleanedData, func(t TimeSeriesMetric) float64 { return math.Max(0, math.Min(100, t.ErrorRate)) }) + `],
borderColor: 'rgb(255, 99, 132)',
backgroundColor: 'rgba(255, 99, 132, 0.1)',
tension: 0.1,
fill: true
}]
},
options: {
responsive: true,
maintainAspectRatio: true,
plugins: {
legend: { position: 'top' },
tooltip: { mode: 'index', intersect: false }
},
scales: {
y: {
beginAtZero: true,
max: 100,
title: { display: true, text: 'Error Rate (%)' }
},
x: {
title: { display: true, text: 'Time' },
ticks: {
maxRotation: 45,
minRotation: 45,
autoSkip: true,
maxTicksLimit: 15
}
}
}
}
});
</script>
</body>
</html>`
return os.WriteFile(filename, []byte(html), 0644)
}
// Nettoie les données aberrantes
func (m *AggregatedMetrics) cleanTimeSeriesData() []TimeSeriesMetric {
cleaned := make([]TimeSeriesMetric, 0, len(m.TimeSeries))
for _, metric := range m.TimeSeries {
// Filtrer les valeurs aberrantes
if metric.RPS < 0 || math.IsNaN(metric.RPS) || math.IsInf(metric.RPS, 0) {
metric.RPS = 0
}
if metric.ErrorRate < 0 || math.IsNaN(metric.ErrorRate) {
metric.ErrorRate = 0
}
if metric.ErrorRate > 100 {
metric.ErrorRate = 100
}
cleaned = append(cleaned, metric)
}
return cleaned
}
func (m *AggregatedMetrics) generateTimeLabels(cleanedData []TimeSeriesMetric) string {
labels := make([]string, len(cleanedData))
for i, metric := range cleanedData {
labels[i] = `"` + metric.Timestamp.Format("15:04:05") + `"`
}
return "[" + strings.Join(labels, ",") + "]"
}
// Fonction generateFloatArray corrigée
func (m *AggregatedMetrics) generateFloatArray(cleanedData []TimeSeriesMetric, extractor func(TimeSeriesMetric) float64) string {
values := make([]string, len(cleanedData))
for i, metric := range cleanedData {
val := extractor(metric)
if math.IsNaN(val) || math.IsInf(val, 0) {
val = 0
}
values[i] = fmt.Sprintf("%.2f", val)
}
return strings.Join(values, ",")
}
// Fonction generateErrorList corrigée
func (m *AggregatedMetrics) generateErrorList() string {
if len(m.ErrorsByType) == 0 {
return "<div>Aucune erreur détectée</div>"
}
result := ""
for errType, count := range m.ErrorsByType {
result += fmt.Sprintf("<div class='error-item'><strong>%s</strong>: %d occurrences</div>", errType, count)
}
return result
}
type Worker struct {
id int
url string
client *http.Client
metrics *Metrics
stopChan <-chan struct{}
}
func NewWorker(id int, url string, metrics *Metrics, stopChan <-chan struct{}) *Worker {
transport := &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 90 * time.Second,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
return &Worker{
id: id,
url: url,
client: &http.Client{Transport: transport, Timeout: 30 * time.Second},
metrics: metrics,
stopChan: stopChan,
}
}
func (w *Worker) Run(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case <-w.stopChan:
return
default:
start := time.Now()
resp, err := w.client.Get(w.url)
latency := time.Since(start)
if err != nil {
w.metrics.AddResult(latency, err, 0)
continue
}
defer resp.Body.Close()
io.Copy(io.Discard, resp.Body)
w.metrics.AddResult(latency, nil, resp.StatusCode)
}
}
}
type StressTest struct {
url string
totalThreads int
duration time.Duration
totalRequests int64
rampUp time.Duration
metrics *Metrics
workers []*Worker
stopChan chan struct{}
wg sync.WaitGroup
}
func NewStressTest(url string, threads int, duration time.Duration, requests int64, rampUp time.Duration, windowSize time.Duration) *StressTest {
return &StressTest{
url: url,
totalThreads: threads,
duration: duration,
totalRequests: requests,
rampUp: rampUp,
metrics: NewMetrics(windowSize),
stopChan: make(chan struct{}),
}
}
func (st *StressTest) Run() {
fmt.Printf("Starting stress test on %s\n", st.url)
fmt.Printf("Threads: %d, Duration: %.0fs, Ramp-up: %.0fs\n",
st.totalThreads, st.duration.Seconds(), st.rampUp.Seconds())
fmt.Println(stringRepeat("-", 60))
st.metrics.startTime = time.Now()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rampStep := st.rampUp / time.Duration(st.totalThreads)
for i := 0; i < st.totalThreads; i++ {
worker := NewWorker(i, st.url, st.metrics, st.stopChan)
st.workers = append(st.workers, worker)
st.wg.Add(1)
go func(w *Worker, delay time.Duration) {
defer st.wg.Done()
if delay > 0 {
time.Sleep(delay)
}
w.Run(ctx)
}(worker, time.Duration(i)*rampStep)
if st.rampUp > 0 {
time.Sleep(rampStep / 10)
}
}
if st.totalRequests > 0 {
go func() {
for atomic.LoadInt64(&st.metrics.totalReqs) < st.totalRequests {
time.Sleep(100 * time.Millisecond)
}
close(st.stopChan)
cancel()
}()
}
if st.duration > 0 {
time.Sleep(st.duration)
close(st.stopChan)
cancel()
}
st.wg.Wait()
st.metrics.SetEndTime()
}
func stringRepeat(s string, count int) string {
result := ""
for i := 0; i < count; i++ {
result += s
}
return result
}
func printMetrics(metrics *AggregatedMetrics) {
fmt.Println("\n" + stringRepeat("=", 60))
fmt.Println("STRESS TEST RESULTS")
fmt.Println(stringRepeat("=", 60))
fmt.Printf("Total Duration: %.2f seconds\n", metrics.EndTime.Sub(metrics.StartTime).Seconds())
fmt.Printf("Total Requests: %d\n", metrics.TotalRequests)
fmt.Printf("Successful Requests: %d\n", metrics.SuccessRequests)
fmt.Printf("Failed Requests: %d\n", metrics.ErrorRequests)
fmt.Printf("Error Rate: %.2f%%\n", metrics.ErrorRate)
fmt.Printf("Requests per second: %.2f\n", metrics.AvgRPS)
if metrics.LatencyAvg > 0 {
fmt.Println("\nLatency Metrics:")
fmt.Printf(" Average: %.2f ms\n", float64(metrics.LatencyAvg.Milliseconds()))
fmt.Printf(" Median: %.2f ms\n", float64(metrics.LatencyMedian.Milliseconds()))
fmt.Printf(" P95: %.2f ms\n", float64(metrics.LatencyP95.Milliseconds()))
fmt.Printf(" P99: %.2f ms\n", float64(metrics.LatencyP99.Milliseconds()))
fmt.Printf(" Min: %.2f ms\n", float64(metrics.LatencyMin.Milliseconds()))
fmt.Printf(" Max: %.2f ms\n", float64(metrics.LatencyMax.Milliseconds()))
}
if len(metrics.ErrorsByType) > 0 {
fmt.Println("\nError Breakdown:")
for errType, count := range metrics.ErrorsByType {
fmt.Printf(" %s: %d\n", errType, count)
}
}
}
func main() {
var (
url string
threads int
duration int
requests int64
rampUp int
windowSize int
outputCSV string
outputJSON string
outputHTML string
)
flag.StringVar(&url, "url", "", "Target URL to test")
flag.IntVar(&threads, "t", 10, "Number of threads (goroutines)")
flag.IntVar(&duration, "d", 0, "Test duration in seconds")
flag.Int64Var(&requests, "r", 0, "Total number of requests to make")
flag.IntVar(&rampUp, "ru", 0, "Ramp-up time in seconds")
flag.IntVar(&windowSize, "ws", 5, "Window size for time series in seconds")
flag.StringVar(&outputCSV, "csv", "stress_test_results.csv", "Output CSV file")
flag.StringVar(&outputJSON, "json", "stress_test_results.json", "Output JSON file")
flag.StringVar(&outputHTML, "html", "stress_test_report.html", "Output HTML report")
flag.Parse()
if url == "" {
fmt.Println("Error: URL is required")
flag.Usage()
return
}
if duration == 0 && requests == 0 {
fmt.Println("Error: You must specify either -d or -r")
return
}
test := NewStressTest(
url,
threads,
time.Duration(duration)*time.Second,
requests,
time.Duration(rampUp)*time.Second,
time.Duration(windowSize)*time.Second,
)
test.Run()
metrics := test.metrics.GetAggregatedMetrics()
printMetrics(metrics)
// Export des résultats
fmt.Println("\n" + stringRepeat("-", 60))
fmt.Println("Exporting results...")
if err := metrics.ExportCSV(outputCSV); err != nil {
log.Printf("Error exporting CSV: %v\n", err)
} else {
fmt.Printf("✅ CSV exported to: %s\n", outputCSV)
}
if err := metrics.ExportJSON(outputJSON); err != nil {
log.Printf("Error exporting JSON: %v\n", err)
} else {
fmt.Printf("✅ JSON exported to: %s\n", outputJSON)
}
if err := metrics.GenerateHTMLReport(outputHTML); err != nil {
log.Printf("Error generating HTML report: %v\n", err)
} else {
fmt.Printf("✅ HTML report generated: %s\n", outputHTML)
}
}