fix: resolve golangci-lint errors (batch 1)

- cmd/gitea-cli: fix errcheck, perfsprint, use modules/json, http constants
- models/migrations: remove unused nolint directive
- models/organization: interface{} -> any
- modules/health: rename HealthResponse -> Response to avoid stutter
- modules/idempotency: use modules/json, fix errcheck, rename IdempotencyInfo -> Info
- modules/structs: fix Verified_At naming, use omitzero

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
David H. Friedel Jr. 2026-01-09 17:46:44 -05:00
parent 9d7fab06d6
commit 81bb23f0da
8 changed files with 60 additions and 57 deletions

View File

@ -5,6 +5,7 @@ package cmd
import (
"bufio"
"errors"
"fmt"
"os"
"path/filepath"
@ -47,7 +48,7 @@ var statusCmd = &cobra.Command{
func init() {
loginCmd.Flags().String("server", "", "Gitea server URL (required)")
loginCmd.Flags().String("token", "", "API token (if not provided, will prompt)")
loginCmd.MarkFlagRequired("server")
_ = loginCmd.MarkFlagRequired("server")
authCmd.AddCommand(loginCmd)
authCmd.AddCommand(logoutCmd)
@ -70,7 +71,7 @@ func runLogin(cmd *cobra.Command, args []string) error {
} else {
// Prompt for token
fmt.Print("API Token: ")
byteToken, err := term.ReadPassword(int(syscall.Stdin))
byteToken, err := term.ReadPassword(syscall.Stdin)
if err != nil {
// Fallback if terminal not available
reader := bufio.NewReader(os.Stdin)
@ -83,7 +84,7 @@ func runLogin(cmd *cobra.Command, args []string) error {
}
if apiToken == "" {
return fmt.Errorf("token is required")
return errors.New("token is required")
}
// Verify the token works
@ -109,7 +110,7 @@ func runLogin(cmd *cobra.Command, args []string) error {
return fmt.Errorf("failed to marshal config: %w", err)
}
if err := os.WriteFile(configPath, data, 0600); err != nil {
if err := os.WriteFile(configPath, data, 0o600); err != nil {
return fmt.Errorf("failed to write config: %w", err)
}

View File

@ -58,8 +58,8 @@ func init() {
rootCmd.PersistentFlags().StringVarP(&token, "token", "t", "", "API token")
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "verbose output")
viper.BindPFlag("server", rootCmd.PersistentFlags().Lookup("server"))
viper.BindPFlag("token", rootCmd.PersistentFlags().Lookup("token"))
_ = viper.BindPFlag("server", rootCmd.PersistentFlags().Lookup("server"))
_ = viper.BindPFlag("token", rootCmd.PersistentFlags().Lookup("token"))
// Add subcommands
rootCmd.AddCommand(versionCmd)

View File

@ -8,7 +8,7 @@ import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@ -19,6 +19,8 @@ import (
"sync/atomic"
"time"
"code.gitea.io/gitea/modules/json"
"github.com/spf13/cobra"
)
@ -79,19 +81,19 @@ func init() {
releaseAssetCmd.Flags().IntP("parallel", "p", 4, "Number of parallel uploads")
releaseAssetCmd.Flags().Bool("verify-checksum", true, "Verify checksum after upload")
releaseAssetCmd.Flags().Bool("progress", true, "Show progress bar")
releaseAssetCmd.MarkFlagRequired("repo")
releaseAssetCmd.MarkFlagRequired("release")
releaseAssetCmd.MarkFlagRequired("file")
_ = releaseAssetCmd.MarkFlagRequired("repo")
_ = releaseAssetCmd.MarkFlagRequired("release")
_ = releaseAssetCmd.MarkFlagRequired("file")
// resume flags
resumeCmd.Flags().String("session", "", "Upload session ID")
resumeCmd.Flags().StringP("file", "f", "", "File to upload")
resumeCmd.MarkFlagRequired("session")
resumeCmd.MarkFlagRequired("file")
_ = resumeCmd.MarkFlagRequired("session")
_ = resumeCmd.MarkFlagRequired("file")
// list flags
listCmd.Flags().StringP("repo", "r", "", "Repository (owner/repo)")
listCmd.MarkFlagRequired("repo")
_ = listCmd.MarkFlagRequired("repo")
uploadCmd.AddCommand(releaseAssetCmd)
uploadCmd.AddCommand(resumeCmd)
@ -116,8 +118,6 @@ type ProgressTracker struct {
totalBytes int64
bytesWritten int64
startTime time.Time
lastUpdate time.Time
mu sync.Mutex
}
func (p *ProgressTracker) Add(n int64) {
@ -138,7 +138,7 @@ func (p *ProgressTracker) Progress() (current, total int64, percent float64, spe
eta = time.Duration(float64(remaining)/speed) * time.Second
}
}
return
return current, total, percent, speed, eta
}
func runReleaseAssetUpload(cmd *cobra.Command, args []string) error {
@ -155,13 +155,13 @@ func runReleaseAssetUpload(cmd *cobra.Command, args []string) error {
token := getToken()
if server == "" || token == "" {
return fmt.Errorf("not logged in. Use 'gitea-cli auth login' first")
return errors.New("not logged in. Use 'gitea-cli auth login' first")
}
// Parse repo
parts := strings.Split(repo, "/")
if len(parts) != 2 {
return fmt.Errorf("invalid repository format. Use owner/repo")
return errors.New("invalid repository format. Use owner/repo")
}
owner, repoName := parts[0], parts[1]
@ -170,9 +170,7 @@ func runReleaseAssetUpload(cmd *cobra.Command, args []string) error {
if err != nil {
return fmt.Errorf("invalid chunk size: %w", err)
}
if chunkSize > maxChunkSize {
chunkSize = maxChunkSize
}
chunkSize = min(chunkSize, maxChunkSize)
// Open file
file, err := os.Open(filePath)
@ -202,7 +200,9 @@ func runReleaseAssetUpload(cmd *cobra.Command, args []string) error {
return fmt.Errorf("failed to calculate checksum: %w", err)
}
fmt.Printf("done (%s)\n", checksum[:16]+"...")
file.Seek(0, 0) // Reset file position
if _, err := file.Seek(0, 0); err != nil {
return fmt.Errorf("failed to seek file: %w", err)
}
}
// Create upload session
@ -259,7 +259,7 @@ func runResumeUpload(cmd *cobra.Command, args []string) error {
token := getToken()
if server == "" || token == "" {
return fmt.Errorf("not logged in")
return errors.New("not logged in")
}
// Get session status
@ -274,7 +274,7 @@ func runResumeUpload(cmd *cobra.Command, args []string) error {
}
if session.Status == "expired" {
return fmt.Errorf("upload session has expired")
return errors.New("upload session has expired")
}
// Open file
@ -320,12 +320,12 @@ func runListUploads(cmd *cobra.Command, args []string) error {
token := getToken()
if server == "" || token == "" {
return fmt.Errorf("not logged in")
return errors.New("not logged in")
}
parts := strings.Split(repo, "/")
if len(parts) != 2 {
return fmt.Errorf("invalid repository format")
return errors.New("invalid repository format")
}
sessions, err := listUploadSessions(server, token, parts[0], parts[1])
@ -366,7 +366,7 @@ func uploadChunks(ctx context.Context, server, token string, session *UploadSess
var wg sync.WaitGroup
// Start workers
for i := 0; i < parallel; i++ {
for range parallel {
wg.Add(1)
go func() {
defer wg.Done()
@ -408,7 +408,11 @@ func uploadChunks(ctx context.Context, server, token string, session *UploadSess
// Read and queue chunks
for chunkNum := session.ChunksReceived; chunkNum < totalChunks; chunkNum++ {
offset := chunkNum * chunkSize
file.Seek(offset, 0)
if _, err := file.Seek(offset, 0); err != nil {
close(jobs)
close(done)
return fmt.Errorf("failed to seek: %w", err)
}
size := chunkSize
if chunkNum == totalChunks-1 {
@ -452,9 +456,7 @@ func uploadChunks(ctx context.Context, server, token string, session *UploadSess
func progressBar(percent float64, width int) string {
filled := int(percent / 100 * float64(width))
if filled > width {
filled = width
}
filled = min(filled, width)
return strings.Repeat("█", filled) + strings.Repeat("░", width-filled)
}
@ -503,8 +505,8 @@ func parseSize(s string) (int64, error) {
} else if strings.HasSuffix(s, "KB") {
multiplier = 1024
s = strings.TrimSuffix(s, "KB")
} else if strings.HasSuffix(s, "B") {
s = strings.TrimSuffix(s, "B")
} else if suffix, found := strings.CutSuffix(s, "B"); found {
s = suffix
}
var value int64
@ -539,7 +541,7 @@ func createUploadSession(server, token, owner, repo, release, fileName string, f
}
jsonBody, _ := json.Marshal(body)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(jsonBody))
if err != nil {
return nil, err
}
@ -569,7 +571,7 @@ func createUploadSession(server, token, owner, repo, release, fileName string, f
func getUploadSession(server, token, sessionID string) (*UploadSession, error) {
url := fmt.Sprintf("%s/api/v1/repos/uploads/%s", server, sessionID)
req, err := http.NewRequest("GET", url, nil)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
@ -583,7 +585,7 @@ func getUploadSession(server, token, sessionID string) (*UploadSession, error) {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("session not found")
return nil, errors.New("session not found")
}
var session UploadSession
@ -597,7 +599,7 @@ func getUploadSession(server, token, sessionID string) (*UploadSession, error) {
func uploadChunk(server, token, sessionID string, chunkNum int64, data []byte) error {
url := fmt.Sprintf("%s/api/v1/repos/uploads/%s/chunks/%d", server, sessionID, chunkNum)
req, err := http.NewRequest("PUT", url, bytes.NewReader(data))
req, err := http.NewRequest(http.MethodPut, url, bytes.NewReader(data))
if err != nil {
return err
}
@ -628,7 +630,7 @@ type CompleteResult struct {
func completeUpload(server, token, sessionID string) (*CompleteResult, error) {
url := fmt.Sprintf("%s/api/v1/repos/uploads/%s/complete", server, sessionID)
req, err := http.NewRequest("POST", url, nil)
req, err := http.NewRequest(http.MethodPost, url, nil)
if err != nil {
return nil, err
}
@ -657,7 +659,7 @@ func completeUpload(server, token, sessionID string) (*CompleteResult, error) {
func listUploadSessions(server, token, owner, repo string) ([]*UploadSession, error) {
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/uploads", server, owner, repo)
req, err := http.NewRequest("GET", url, nil)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
@ -671,7 +673,7 @@ func listUploadSessions(server, token, owner, repo string) ([]*UploadSession, er
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to list sessions")
return nil, errors.New("failed to list sessions")
}
var sessions []*UploadSession

View File

@ -1,7 +1,7 @@
// Copyright 2026 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package v1_26 //nolint
package v1_26
import (
"code.gitea.io/gitea/modules/timeutil"

View File

@ -40,7 +40,7 @@ type OrgPinnedRepo struct {
DisplayOrder int `xorm:"DEFAULT 0"`
CreatedUnix timeutil.TimeStamp `xorm:"created"`
Repo interface{} `xorm:"-"` // Will be set by caller (repo_model.Repository)
Repo any `xorm:"-"` // Will be set by caller (repo_model.Repository)
Group *OrgPinnedGroup `xorm:"-"`
}

View File

@ -35,8 +35,8 @@ type ComponentCheck struct {
Metadata map[string]any `json:"metadata,omitempty"`
}
// HealthResponse represents the complete health check response
type HealthResponse struct {
// Response represents the complete health check response
type Response struct {
Status Status `json:"status"`
Version string `json:"version"`
Uptime time.Duration `json:"uptime_seconds"`
@ -105,7 +105,7 @@ func (m *Manager) UnregisterChecker(name string) {
}
// Check performs all health checks
func (m *Manager) Check(ctx context.Context, includeSystem bool) *HealthResponse {
func (m *Manager) Check(ctx context.Context, includeSystem bool) *Response {
m.mu.RLock()
checkers := make(map[string]Checker)
for k, v := range m.checkers {
@ -113,7 +113,7 @@ func (m *Manager) Check(ctx context.Context, includeSystem bool) *HealthResponse
}
m.mu.RUnlock()
response := &HealthResponse{
response := &Response{
Status: StatusHealthy,
Version: m.version,
Uptime: time.Since(m.startTime),
@ -243,7 +243,7 @@ func (m *Manager) LivenessCheck() *ComponentCheck {
}
// ReadinessCheck performs a readiness check (can the service handle requests?)
func (m *Manager) ReadinessCheck(ctx context.Context) *HealthResponse {
func (m *Manager) ReadinessCheck(ctx context.Context) *Response {
// For readiness, we only check critical components
return m.Check(ctx, false)
}

View File

@ -8,11 +8,11 @@ package idempotency
import (
"bytes"
"encoding/json"
"net/http"
"sync"
"time"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
)
@ -242,7 +242,7 @@ func (m *Middleware) Handler(next http.Handler) http.Handler {
if len(key) > MaxKeyLength {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]any{
_ = json.NewEncoder(w).Encode(map[string]any{
"code": "IDEMPOTENCY_KEY_TOO_LONG",
"message": "Idempotency key exceeds maximum length of 256 characters",
})
@ -257,7 +257,7 @@ func (m *Middleware) Handler(next http.Handler) http.Handler {
w.Header().Set(k, v)
}
w.WriteHeader(cached.StatusCode)
w.Write(cached.Body)
_, _ = w.Write(cached.Body)
return
}
@ -267,7 +267,7 @@ func (m *Middleware) Handler(next http.Handler) http.Handler {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Retry-After", "1")
w.WriteHeader(http.StatusConflict)
json.NewEncoder(w).Encode(map[string]any{
_ = json.NewEncoder(w).Encode(map[string]any{
"code": "IDEMPOTENCY_KEY_IN_USE",
"message": "A request with this idempotency key is currently being processed",
})
@ -287,8 +287,8 @@ func (m *Middleware) Handler(next http.Handler) http.Handler {
})
}
// IdempotencyInfo represents information about an idempotency key
type IdempotencyInfo struct {
// Info represents information about an idempotency key
type Info struct {
Key string `json:"key"`
CreatedAt time.Time `json:"created_at"`
ExpiresAt time.Time `json:"expires_at"`
@ -296,14 +296,14 @@ type IdempotencyInfo struct {
}
// GetInfo retrieves information about a cached idempotency key
func GetInfo(key string) (*IdempotencyInfo, bool) {
func GetInfo(key string) (*Info, bool) {
store := GetDefaultStore()
cached, ok := store.Get(key)
if !ok {
return nil, false
}
return &IdempotencyInfo{
return &Info{
Key: key,
CreatedAt: cached.CreatedAt,
ExpiresAt: cached.ExpiresAt,

View File

@ -20,9 +20,9 @@ type PagesDomain struct {
Verified bool `json:"verified"`
VerificationToken string `json:"verification_token,omitempty"`
SSLStatus string `json:"ssl_status"`
SSLCertExpiry time.Time `json:"ssl_cert_expiry,omitempty"`
SSLCertExpiry time.Time `json:"ssl_cert_expiry,omitzero"`
Created time.Time `json:"created_at"`
Verified_At time.Time `json:"verified_at,omitempty"`
VerifiedAt time.Time `json:"verified_at,omitempty"`
}
// CreatePagesConfigOption options for creating/updating pages config