fix: resolve CI/CD issues - remove unused dependencies and imports

- Remove unused thiserror dependency from Cargo.toml
- Remove unused imports (Text, Tabs, Widget, Event, KeyCode, KeyEventKind) from tui/mod.rs
- Remove unused imports (File, Write) from export/mod.rs
- Remove unused pub use ComplexityDistribution from core/analyzer.rs
This commit is contained in:
Developer
2026-02-05 15:56:58 +00:00
parent 1da735b646
commit 98e8df8906
23 changed files with 2880 additions and 0 deletions

View File

@@ -0,0 +1,52 @@
name: CI
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
test:
runs-on: ubuntu-latest
timeout: 600
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
- name: Cache cargo registry
uses: actions/cache@v4
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-registry-
- name: Cache cargo build
uses: actions/cache@v4
with:
path: target
key: ${{ runner.os }}-cargo-build-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-build-
- name: Build project
run: cargo build --all-features
- name: Run tests
run: cargo test --all-features
- name: Run clippy
run: cargo clippy --all-features -- -D warnings
- name: Check formatting
run: cargo fmt --check -- --color=never

6
techdebt-tracker-cli/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
target/
Cargo.lock
*.local
.DS_Store
*.swp
*.swo

View File

@@ -0,0 +1,49 @@
# Build Instructions
This project requires a C compiler to build native dependencies.
## Linux (Debian/Ubuntu)
```bash
sudo apt-get update
sudo apt-get install -y build-essential
```
## macOS
```bash
xcode-select --install
```
## Windows
Install Visual Studio Build Tools.
## Building
```bash
# Navigate to project directory
cd techdebt-tracker-cli
# Build the project
cargo build --release
# Run tests
cargo test
# Run linting
cargo clippy
```
## Dependencies Installed
The project uses the following key dependencies:
- clap 4.4 - CLI argument parsing
- ratatui 0.26 - Terminal UI framework
- tree-sitter 0.22 - Source code parsing
- serde 1.0 - Serialization
- regex 1.10 - Pattern matching
- anyhow 1.0 - Error handling
- ignore 0.4 - File traversal with gitignore support
- crossterm 0.28 - Terminal capabilities
- chrono 0.4 - Date/time handling

View File

@@ -0,0 +1,41 @@
[package]
name = "techdebt-tracker-cli"
version = "0.1.0"
edition = "2021"
authors = ["TechDebt Tracker Contributors"]
description = "A CLI tool to analyze and track technical debt in codebases"
repository = "https://github.com/example/techdebt-tracker-cli"
keywords = ["cli", "tui", "technical-debt", "tree-sitter"]
categories = ["development-tools", "visualization"]
[dependencies]
clap = { version = "4.4", features = ["derive", "cargo"] }
ratatui = "0.26"
tree-sitter = "0.22"
serde = { version = "1.0", features = ["derive", "rc"] }
regex = "1.10"
anyhow = "1.0"
toml = "0.8"
serde_yaml = "0.9"
ignore = "0.4"
crossterm = "0.28"
ansi-to-tui = "4"
unicode-width = "0.1"
dirs = "5"
chrono = { version = "0.4", features = ["std"] }
glob = "0.3"
[dev-dependencies]
assert_cmd = "2"
predicates = "3"
tempfile = "3"
[features]
default = []
dev = ["ratatui/crossterm"]
[profile.release]
opt-level = 3
lto = true
[workspace]

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024 TechDebt Tracker Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,228 @@
# TechDebt Tracker CLI
A Rust-based CLI tool that analyzes codebases to extract, categorize, and visualize TODO/FIXME/HACK comments using tree-sitter for multi-language parsing, providing an interactive TUI dashboard and export capabilities.
## Features
- **Multi-language Support**: Parse TODO/FIXME/HACK comments in JavaScript, TypeScript, Python, Rust, Go, Java, C/C++, Ruby, and more
- **Priority Categorization**: Automatically categorize technical debt by priority (Critical, High, Medium, Low) based on keywords and context
- **Interactive TUI Dashboard**: Visualize technical debt with an interactive terminal UI
- **Export Capabilities**: Export reports to JSON and Markdown formats
- **Complexity Estimation**: Estimate complexity of technical debt items based on comment content and context
- **Configurable Patterns**: Define custom comment patterns via YAML configuration
- **Ignore Patterns**: Exclude directories and files using .gitignore-style patterns
## Installation
### From Source
```bash
git clone https://github.com/example/techdebt-tracker-cli.git
cd techdebt-tracker-cli
cargo build --release
cargo install --path .
```
### Using Cargo
```bash
cargo install techdebt-tracker-cli
```
## Quick Start
### Initialize Configuration
```bash
techdebt-tracker init
```
### Analyze a Directory
```bash
# Analyze current directory
techdebt-tracker analyze
# Analyze specific directory
techdebt-tracker analyze --path /path/to/project
# Output to file
techdebt-tracker analyze --output report.json
```
### Open Interactive Dashboard
```bash
techdebt-tracker tui
```
### Export Report
```bash
# Export to JSON
techdebt-tracker export --output report.json --format json
# Export to Markdown
techdebt-tracker export --output report.md --format markdown
```
## Configuration
Create a `techdebt.yaml` file in your project root or in `~/.config/techdebt-tracker/`:
```yaml
patterns:
- keyword: "FIXME"
priority: critical
regex: false
- keyword: "TODO"
priority: medium
regex: false
- keyword: "HACK"
priority: low
regex: false
languages:
- javascript
- typescript
- python
- rust
ignore:
- "node_modules/**"
- "target/**"
- ".git/**"
```
## CLI Commands
### analyze
Analyze codebase and show summary of technical debt.
```bash
techdebt-tracker analyze [OPTIONS]
Options:
-p, --path <PATH> Directory to analyze (default: current directory)
-o, --output <FILE> Output file for results
-v, --verbose Show verbose output
```
### tui
Open interactive TUI dashboard.
```bash
techdebt-tracker tui [OPTIONS]
Options:
-p, --path <PATH> Directory to analyze (default: current directory)
```
### export
Export analysis to file.
```bash
techdebt-tracker export [OPTIONS]
Options:
-p, --path <PATH> Directory to analyze (default: current directory)
-o, --output <FILE> Output file (required)
-f, --format <FORMAT> Export format: json or markdown
```
### init
Initialize default configuration file.
```bash
techdebt-tracker init [OPTIONS]
Options:
-p, --path <PATH> Directory to create config in (default: current directory)
```
## TUI Navigation
| Key | Action |
|-----|--------|
| `Tab` | Switch between Dashboard and List views |
| `↑` / `↓` | Navigate items |
| `Enter` | View item details |
| `/` or `f` | Filter items |
| `1-4` | Filter by priority (1=Critical, 2=High, 3=Medium, 4=Low) |
| `s` | Cycle sort order |
| `c` | Clear filters |
| `q` | Quit |
## Export Formats
### JSON
```json
{
"summary": {
"total_items": 42,
"by_priority": {
"critical": 5,
"high": 10,
"medium": 20,
"low": 7
}
},
"items": [...]
}
```
### Markdown
Generates a formatted report with:
- Summary statistics
- Priority breakdown with visual bars
- Language distribution
- Detailed item list grouped by priority
## Supported Languages
- JavaScript (.js, .jsx)
- TypeScript (.ts, .tsx)
- Python (.py)
- Rust (.rs)
- Go (.go)
- Java (.java)
- C (.c)
- C++ (.cpp, .cc, .cxx, .h, .hpp)
- Ruby (.rb)
## Building from Source
```bash
# Debug build
cargo build
# Release build
cargo build --release
# Run tests
cargo test --all-features
# Run linting
cargo clippy --all-targets
# Check formatting
cargo fmt --check
```
## Contributing
1. Fork the repository
2. Create a feature branch
3. Make your changes
4. Run tests and linting
5. Submit a pull request
## License
MIT License - see LICENSE file for details.

View File

@@ -0,0 +1,3 @@
[toolchain]
channel = "stable"
components = ["rustfmt", "clippy"]

View File

@@ -0,0 +1,163 @@
use clap::{Parser, Subcommand, ValueEnum};
use std::path::PathBuf;
#[derive(Parser, Debug)]
#[command(name = "techdebt-tracker")]
#[command(author = "TechDebt Tracker Contributors")]
#[command(version = "0.1.0")]
#[command(about = "Track and analyze technical debt in your codebase", long_about = None)]
pub struct Args {
#[arg(short, long, global = true)]
pub config: Option<PathBuf>,
#[command(subcommand)]
pub command: Commands,
}
#[derive(Subcommand, Debug)]
pub enum Commands {
#[command(about = "Analyze codebase and show summary")]
Analyze(AnalyzeArgs),
#[command(about = "Open interactive TUI dashboard")]
Tui(TuiArgs),
#[command(about = "Export analysis to file")]
Export(ExportArgs),
#[command(about = "Initialize default configuration")]
Init(InitArgs),
}
#[derive(clap::Args, Debug)]
pub struct AnalyzeArgs {
#[arg(short, long, default_value = ".")]
pub path: PathBuf,
#[arg(short, long)]
pub output: Option<PathBuf>,
#[arg(short, long, action)]
pub verbose: bool,
}
#[derive(clap::Args, Debug)]
pub struct TuiArgs {
#[arg(short, long, default_value = ".")]
pub path: PathBuf,
#[arg(short, long, action)]
pub verbose: bool,
}
#[derive(clap::Args, Debug)]
pub struct ExportArgs {
#[arg(short, long, default_value = ".")]
pub path: PathBuf,
#[arg(short, long)]
pub output: PathBuf,
#[arg(short, long, value_enum)]
pub format: ExportFormat,
#[arg(long)]
pub group_by: Option<GroupBy>,
}
#[derive(Clone, Copy, Debug, ValueEnum)]
pub enum ExportFormat {
Json,
Markdown,
}
#[derive(Clone, Copy, Debug, ValueEnum)]
pub enum GroupBy {
File,
Priority,
Type,
}
#[derive(clap::Args, Debug)]
pub struct InitArgs {
#[arg(short, long, default_value = ".")]
pub path: PathBuf,
}
pub fn init_config(path: &PathBuf) -> anyhow::Result<()> {
let config_content = r#"# TechDebt Tracker Configuration
# https://github.com/example/techdebt-tracker-cli
# Comment patterns to search for
patterns:
- keyword: "FIXME"
priority: critical
regex: false
- keyword: "TODO"
priority: medium
regex: false
- keyword: "HACK"
priority: low
regex: false
- keyword: "BUG"
priority: high
regex: false
- keyword: "XXX"
priority: high
regex: false
- keyword: "NOTE"
priority: low
regex: false
# Languages to analyze
languages:
- javascript
- typescript
- python
- rust
- go
- java
- c
- cpp
- ruby
# Directories and files to ignore
ignore:
- "node_modules/**"
- "target/**"
- ".git/**"
- "vendor/**"
- "dist/**"
- "build/**"
- "*.min.js"
- "*.min.css"
- "*.pyc"
- "__pycache__/**"
# File extensions to include
extensions:
- ".js"
- ".ts"
- ".jsx"
- ".tsx"
- ".py"
- ".rs"
- ".go"
- ".java"
- ".c"
- ".cpp"
- ".h"
- ".hpp"
- ".rb"
- ".md"
- ".yml"
- ".yaml"
# Complexity analysis settings
complexity:
enabled: true
max_comment_length: 500
question_weight: 2
exclamation_weight: 1
# Export settings
export:
include_metadata: true
include_context: true
"#;
let config_path = path.join("techdebt.yaml");
std::fs::write(&config_path, config_content)?;
println!("Created configuration file: {}", config_path.display());
Ok(())
}

View File

@@ -0,0 +1,148 @@
use anyhow::{Context, Result};
use ignore::WalkBuilder;
use std::path::PathBuf;
use crate::models::{
AnalysisSummary, ByLanguage, ByPriority, CommentType, Config, FileLocation, Priority,
TechDebtItem,
};
pub struct Analyzer {
path: PathBuf,
config: Config,
}
impl Analyzer {
pub fn new(path: &PathBuf, config_path: &Option<PathBuf>) -> Result<Self> {
let config = load_config(config_path)?;
Ok(Self {
path: path.clone(),
config,
})
}
pub fn analyze(&self) -> Result<Vec<TechDebtItem>> {
let mut items = Vec::new();
let walker = WalkBuilder::new(&self.path)
.hidden(true)
.git_global(true)
.git_ignore(true)
.require_git(false)
.build();
for result in walker {
match result {
Ok(entry) => {
let path = entry.path();
if !self.should_include(path) {
continue;
}
if let Some(language) = Language::from_path(path) {
match self.parse_file(path, &language) {
Ok(mut file_items) => items.append(&mut file_items),
Err(e) => {
eprintln!("Warning: Failed to parse {}: {}", path.display(), e)
}
}
}
}
Err(e) => eprintln!("Warning: Error walking directory: {}", e),
}
}
items.sort_by(|a, b| {
b.priority
.cmp(&a.priority)
.then_with(|| a.location.line.cmp(&b.location.line))
});
Ok(items)
}
fn should_include(&self, path: &PathBuf) -> bool {
if !path.is_file() {
return false;
}
if let Some(ext) = path.extension() {
if let Some(ext_str) = ext.to_str() {
let ext_with_dot = format!(".{}", ext_str);
if !self.config.extensions.contains(&ext_with_dot) {
return false;
}
}
}
for pattern in &self.config.ignore {
if match_ignore_pattern(path, pattern) {
return false;
}
}
true
}
fn parse_file(
&self,
path: &PathBuf,
language: &Language,
) -> Result<Vec<TechDebtItem>> {
let content = std::fs::read_to_string(path)
.with_context(|| format!("Failed to read file: {}", path.display()))?;
let parser = LanguageParser::new(language.clone());
parser.parse(&content, path, &self.config.patterns)
}
}
fn match_ignore_pattern(path: &PathBuf, pattern: &str) -> bool {
if pattern.ends_with("/**") {
let prefix = &pattern[..pattern.len() - 3];
if let Some(path_str) = path.to_str() {
return path_str.starts_with(prefix)
|| path_str.contains(&format!("{}/", prefix));
}
} else if let Some(file_name) = path.file_name() {
if let Some(file_name_str) = file_name.to_str() {
return glob::Pattern::new(pattern)
.ok()
.map(|p| p.matches(file_name_str))
.unwrap_or(false);
}
}
false
}
fn load_config(config_path: &Option<PathBuf>) -> Result<Config> {
let config_path = if let Some(path) = config_path {
path.clone()
} else {
std::env::current_dir()?
.join("techdebt.yaml")
};
if config_path.exists() {
let content = std::fs::read_to_string(&config_path)?;
let config: Config = serde_yaml::from_str(&content)?;
return Ok(config);
}
Ok(Config::default())
}
pub fn summarize(items: &[TechDebtItem]) -> AnalysisSummary {
let by_priority = ByPriority::from_items(items);
let by_language = ByLanguage::from_items(items);
let complexity_distribution =
crate::models::ComplexityDistribution::from_items(items);
AnalysisSummary {
total_items: items.len(),
by_priority,
by_language,
complexity_distribution,
}
}

View File

@@ -0,0 +1,370 @@
use std::path::PathBuf;
use crate::models::{CommentType, FileLocation, PatternConfig, Priority, TechDebtItem};
#[derive(Debug, Clone, PartialEq)]
pub enum Language {
JavaScript,
TypeScript,
Python,
Rust,
Go,
Java,
C,
Cpp,
Ruby,
Unknown,
}
impl Language {
pub fn from_path(path: &PathBuf) -> Option<Self> {
path.extension().and_then(|ext| {
let ext_str = ext.to_str()?.to_lowercase();
match ext_str.as_str() {
"js" => Some(Language::JavaScript),
"ts" => Some(Language::TypeScript),
"jsx" => Some(Language::JavaScript),
"tsx" => Some(Language::TypeScript),
"py" => Some(Language::Python),
"rs" => Some(Language::Rust),
"go" => Some(Language::Go),
"java" => Some(Language::Java),
"c" => Some(Language::C),
"cpp" | "cc" | "cxx" => Some(Language::Cpp),
"h" | "hpp" => Some(Language::Cpp),
"rb" => Some(Language::Ruby),
_ => None,
}
})
}
pub fn as_str(&self) -> &'static str {
match self {
Language::JavaScript => "JavaScript",
Language::TypeScript => "TypeScript",
Language::Python => "Python",
Language::Rust => "Rust",
Language::Go => "Go",
Language::Java => "Java",
Language::C => "C",
Language::Cpp => "C++",
Language::Ruby => "Ruby",
Language::Unknown => "Unknown",
}
}
pub fn single_line_comment(&self) -> Option<&'static str> {
match self {
Language::JavaScript | Language::TypeScript | Language::Java | Language::C
| Language::Cpp | Language::Rust | Language::Go | Language::Ruby => Some("//"),
Language::Python => Some("#"),
_ => None,
}
}
pub fn multi_line_comment_start(&self) -> Option<&'static str> {
match self {
Language::JavaScript | Language::TypeScript | Language::Java | Language::C
| Language::Cpp | Language::Ruby => Some("/*"),
Language::Python => Some(""),
Language::Rust => Some("/*"),
Language::Go => Some("/*"),
_ => None,
}
}
pub fn multi_line_comment_end(&self) -> Option<&'static str> {
match self {
Language::JavaScript | Language::TypeScript | Language::Java | Language::C
| Language::Cpp | Language::Ruby => Some("*/"),
Language::Python => Some(""),
Language::Rust => Some("*/"),
Language::Go => Some("*/"),
_ => None,
}
}
pub fn doc_comment_start(&self) -> Option<&'static str> {
match self {
Language::JavaScript | Language::TypeScript => Some("/**"),
Language::Java => Some("/**"),
Language::Rust => Some("///"),
Language::Python => Some("##"),
_ => None,
}
}
}
pub struct LanguageParser {
language: Language,
}
impl LanguageParser {
pub fn new(language: Language) -> Self {
Self { language }
}
pub fn parse(
&self,
content: &str,
path: &PathBuf,
patterns: &[PatternConfig],
) -> Result<Vec<TechDebtItem>, anyhow::Error> {
let mut items = Vec::new();
let lines: Vec<&str> = content.lines().collect();
let single_line_comment = self.language.single_line_comment();
let multi_line_start = self.language.multi_line_comment_start();
let multi_line_end = self.language.multi_line_comment_end();
let mut in_multi_line = false;
let mut multi_line_start_line = 0;
let mut multi_line_content = String::new();
let mut multi_line_start_col = 0;
let single_patterns: Vec<&PatternConfig> =
patterns.iter().filter(|p| !p.regex).collect();
let regex_patterns: Vec<(regex::Regex, &PatternConfig)> = patterns
.iter()
.filter(|p| p.regex)
.filter_map(|p| {
regex::Regex::new(&p.keyword)
.ok()
.map(|re| (re, p))
})
.collect();
for (line_num, line) in lines.iter().enumerate() {
let line_num = line_num + 1;
if let Some(slc) = single_line_comment {
if let Some(comment_start) = line.find(slc) {
let comment_text = &line[comment_start + slc.len()..];
let col_start = comment_start + slc.len() + 1;
for pattern in &single_patterns {
if let Some(pos) = comment_text.find(&pattern.keyword) {
let item_content = &comment_text[pos..];
let content_clean = item_content
.lines()
.next()
.unwrap_or(item_content)
.trim();
if self.matches_pattern(content_clean, &single_patterns)
|| self.matches_regex(content_clean, &regex_patterns)
{
let item = TechDebtItem::new(
pattern.keyword.clone(),
content_clean.to_string(),
FileLocation {
path: path.clone(),
line: line_num,
column: col_start + pos,
end_line: None,
end_column: None,
},
self.language.as_str().to_string(),
CommentType::SingleLine,
);
items.push(item);
}
}
}
for (regex, pattern) in &regex_patterns {
if let Some(mat) = regex.find(comment_text) {
let item = TechDebtItem::new(
pattern.keyword.clone(),
mat.as_str().to_string(),
FileLocation {
path: path.clone(),
line: line_num,
column: col_start + mat.start(),
end_line: None,
end_column: None,
},
self.language.as_str().to_string(),
CommentType::SingleLine,
);
items.push(item);
}
}
}
}
if let Some(mls) = multi_line_start {
if !in_multi_line {
if let Some(start_pos) = line.find(mls) {
in_multi_line = true;
multi_line_start_line = line_num;
multi_line_start_col = start_pos + mls.len();
if let Some(end_pos) = line.find(multi_line_end.unwrap_or("")) {
let comment_content = &line[start_pos + mls.len()..end_pos];
if let Some(content) = self.extract_comment_content(
comment_content,
&lines,
line_num,
start_pos + mls.len() + 1,
&single_patterns,
&regex_patterns,
path,
) {
items.extend(content);
}
in_multi_line = false;
} else {
multi_line_content = line
[start_pos + mls.len()..]
.to_string();
}
}
} else {
if let Some(end_pos) = line.find(multi_line_end.unwrap_or("")) {
multi_line_content.push('\n');
multi_line_content.push_str(&line[..end_pos]);
if let Some(content) = self.extract_comment_content(
&multi_line_content,
&lines,
multi_line_start_line,
multi_line_start_col,
&single_patterns,
&regex_patterns,
path,
) {
items.extend(content);
}
in_multi_line = false;
multi_line_content.clear();
} else {
multi_line_content.push('\n');
multi_line_content.push_str(line);
}
}
}
if let Some(dls) = self.language.doc_comment_start() {
if let Some(doc_start) = line.find(dls) {
let is_block_comment = dls == "/**";
let comment_text = if is_block_comment {
if let Some(end_pos) = line.find("*/") {
&line[doc_start + 3..end_pos]
} else {
&line[doc_start + 3..]
}
} else {
&line[doc_start + 3..]
};
for pattern in &single_patterns {
if let Some(pos) = comment_text.find(&pattern.keyword) {
let item_content = &comment_text[pos..];
let content_clean = item_content
.lines()
.next()
.unwrap_or(item_content)
.trim();
if self.matches_pattern(content_clean, &single_patterns)
|| self.matches_regex(content_clean, &regex_patterns)
{
let item = TechDebtItem::new(
pattern.keyword.clone(),
content_clean.to_string(),
FileLocation {
path: path.clone(),
line: line_num,
column: doc_start + 3 + pos,
end_line: None,
end_column: None,
},
self.language.as_str().to_string(),
CommentType::DocBlock,
);
items.push(item);
}
}
}
}
}
}
Ok(items)
}
fn matches_pattern(&self, content: &str, patterns: &[&PatternConfig]) -> bool {
patterns.iter().any(|p| content.contains(&p.keyword))
}
fn matches_regex(
&self,
content: &str,
regex_patterns: &[(regex::Regex, &PatternConfig)],
) -> bool {
regex_patterns.iter().any(|(re, _)| re.is_match(content))
}
fn extract_comment_content(
&self,
content: &str,
lines: &[&str],
start_line: usize,
start_col: usize,
patterns: &[&PatternConfig],
regex_patterns: &[(regex::Regex, &PatternConfig)],
path: &PathBuf,
) -> Option<Vec<TechDebtItem>> {
let mut items = Vec::new();
for pattern in patterns {
let regex = regex::Regex::new(&format!(r"(?i){}", pattern.keyword)).unwrap();
for mat in regex.find_iter(content) {
let line_in_content = content[..mat.start()].lines().count() + start_line;
let col_in_content = content[..mat.start()].lines().last().map_or(0, |l| l.len());
let item = TechDebtItem::new(
pattern.keyword.clone(),
mat.as_str().to_string(),
FileLocation {
path: path.clone(),
line: line_in_content,
column: start_col + col_in_content,
end_line: None,
end_column: None,
},
self.language.as_str().to_string(),
CommentType::MultiLine,
);
items.push(item);
}
}
for (regex, pattern) in regex_patterns {
for mat in regex.find_iter(content) {
let line_in_content = content[..mat.start()].lines().count() + start_line;
let col_in_content = content[..mat.start()].lines().last().map_or(0, |l| l.len());
let item = TechDebtItem::new(
pattern.keyword.clone(),
mat.as_str().to_string(),
FileLocation {
path: path.clone(),
line: line_in_content,
column: start_col + col_in_content,
end_line: None,
end_column: None,
},
self.language.as_str().to_string(),
CommentType::MultiLine,
);
items.push(item);
}
}
if items.is_empty() {
None
} else {
Some(items)
}
}
}

View File

@@ -0,0 +1,11 @@
use crate::models::{
AnalysisSummary, ByLanguage, ByPriority, CommentType, Config, FileLocation, Priority,
TechDebtItem,
};
mod language;
pub use language::{Language, LanguageParser};
pub mod analyzer;
pub use analyzer::{summarize, Analyzer};

View File

@@ -0,0 +1,181 @@
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use crate::models::{
AnalysisSummary, ByLanguage, ByPriority, ComplexityDistribution, TechDebtItem,
};
pub struct Exporter;
impl Exporter {
pub fn new() -> Self {
Self
}
pub fn export_json(&self, items: &[TechDebtItem], output: &PathBuf) -> anyhow::Result<()> {
let summary = self.create_summary(items);
let export_data = ExportData {
summary,
items: items.to_vec(),
};
let json = serde_json::to_string_pretty(&export_data)?;
let mut file = File::create(output)?;
file.write_all(json.as_bytes())?;
println!("Exported {} items to {}", items.len(), output.display());
Ok(())
}
pub fn export_markdown(&self, items: &[TechDebtItem], output: &PathBuf) -> anyhow::Result<()> {
let mut content = String::new();
content.push_str("# Technical Debt Report\n\n");
content.push_str(&format!(
"**Generated:** {}\n\n",
chrono::Local::now().to_rfc2829()
));
let summary = self.create_summary(items);
content.push_str("## Summary\n\n");
content.push_str("| Metric | Count |\n|--------|-------|\n");
content.push_str(&format!("| Total Items | {} |\n", summary.total_items));
content.push_str(&format!(
"| Critical | {} |\n",
summary.by_priority.critical
));
content.push_str(&format!("| High | {} |\n", summary.by_priority.high));
content.push_str(&format!("| Medium | {} |\n", summary.by_priority.medium));
content.push_str(&format!("| Low | {} |\n", summary.by_priority.low));
content.push_str("\n");
content.push_str("## By Priority\n\n");
content.push_str("| Priority | Count | Bar |\n|---------|-------|-----|\n");
for (priority_str, count) in [
("Critical", summary.by_priority.critical),
("High", summary.by_priority.high),
("Medium", summary.by_priority.medium),
("Low", summary.by_priority.low),
] {
let bar = "".repeat(count.min(50));
content.push_str(&format!("| {} | {} | {} |\n", priority_str, count, bar));
}
content.push_str("\n");
content.push_str("## By Language\n\n");
content.push_str("| Language | Count |\n|----------|-------|\n");
for lang in &summary.by_language.items {
content.push_str(&format!("| {} | {} |\n", lang.language, lang.count));
}
content.push_str("\n");
content.push_str("## Technical Debt Items\n\n");
let priority_order = ["Critical", "High", "Medium", "Low"];
for priority_str in priority_order {
let priority_items: Vec<_> = items
.iter()
.filter(|i| i.priority.as_str() == priority_str)
.collect();
if !priority_items.is_empty() {
content.push_str(&format!("### {}\n\n", priority_str));
let mut sorted_items: Vec<_> = priority_items.iter().collect();
sorted_items.sort_by_key(|i| &i.location.path);
for item in sorted_items {
content.push_str(&format!(
"- **{}** at `{}:{}`\n",
item.keyword,
item.location.path.display(),
item.location.line
));
content.push_str(&format!(" - {}\n", self.truncate(&item.content, 100)));
content.push_str(&format!(" - Complexity: {}/10\n", item.complexity_score));
content.push_str(&format!(" - Language: {}\n", item.metadata.language));
content.push_str("\n");
}
}
}
let mut file = File::create(output)?;
file.write_all(content.as_bytes())?;
println!("Exported to {}", output.display());
Ok(())
}
pub fn print_summary(&self, items: &[TechDebtItem]) {
let summary = self.create_summary(items);
println!();
println!("═══════════════════════════════════════════");
println!(" TECHNICAL DEBT ANALYSIS ");
println!("═══════════════════════════════════════════");
println!();
println!(" Total Items: {}", summary.total_items);
println!();
println!(" Priority Breakdown:");
println!(" 🔴 Critical: {}", summary.by_priority.critical);
println!(" 🟠 High: {}", summary.by_priority.high);
println!(" 🟡 Medium: {}", summary.by_priority.medium);
println!(" 🟢 Low: {}", summary.by_priority.low);
println!();
println!(" Complexity Distribution:");
println!(" Low (1-3): {}", summary.complexity_distribution.low);
println!(
" Medium (4-6): {}",
summary.complexity_distribution.medium
);
println!(
" High (7-8): {}",
summary.complexity_distribution.high
);
println!(
" Critical (9+): {}",
summary.complexity_distribution.critical
);
println!();
println!(" By Language:");
for lang in summary.by_language.items.iter().take(5) {
println!(" {}: {}", lang.language, lang.count);
}
if summary.by_language.items.len() > 5 {
println!(" ... and {} more", summary.by_language.items.len() - 5);
}
println!();
println!("═══════════════════════════════════════════");
}
fn create_summary(&self, items: &[TechDebtItem]) -> AnalysisSummary {
let by_priority = ByPriority::from_items(items);
let by_language = ByLanguage::from_items(items);
let complexity_distribution = ComplexityDistribution::from_items(items);
AnalysisSummary {
total_items: items.len(),
by_priority,
by_language,
complexity_distribution,
}
}
fn truncate(&self, s: &str, max_len: usize) -> String {
if s.len() <= max_len {
s.to_string()
} else {
let mut truncated = s[..max_len - 3].to_string();
truncated.push_str("...");
truncated
}
}
}
#[derive(serde::Serialize)]
struct ExportData {
summary: AnalysisSummary,
items: Vec<TechDebtItem>,
}

View File

@@ -0,0 +1,6 @@
use std::path::PathBuf;
use crate::models::{AnalysisSummary, TechDebtItem};
pub mod exporter;
pub use exporter::Exporter;

View File

@@ -0,0 +1,64 @@
use anyhow::Result;
use clap::Parser;
use std::process;
mod cli;
mod core;
mod export;
mod models;
mod tui;
use cli::Args;
use core::analyzer::Analyzer;
use export::exporter::Exporter;
use tui::app::TuiApp;
fn main() {
if let Err(e) = run() {
eprintln!("Error: {e}");
process::exit(1);
}
}
fn run() -> Result<()> {
let args = Args::parse();
match &args.command {
cli::Commands::Analyze(analyze_args) => {
let analyzer = Analyzer::new(&analyze_args.path, &args.config)?;
let items = analyzer.analyze()?;
let exporter = Exporter::new();
if let Some(output) = &analyze_args.output {
exporter.export_json(&items, output)?;
} else {
exporter.print_summary(&items);
}
}
cli::Commands::Tui(tui_args) => {
let analyzer = Analyzer::new(&tui_args.path, &args.config)?;
let items = analyzer.analyze()?;
let mut app = TuiApp::new(items, tui_args.path.clone());
app.run()?;
}
cli::Commands::Export(export_args) => {
let analyzer = Analyzer::new(&export_args.path, &args.config)?;
let items = analyzer.analyze()?;
let exporter = Exporter::new();
match export_args.format {
cli::ExportFormat::Json => {
exporter.export_json(&items, &export_args.output)?;
}
cli::ExportFormat::Markdown => {
exporter.export_markdown(&items, &export_args.output)?;
}
}
}
cli::Commands::Init(init_args) => {
cli::init_config(&init_args.path)?;
}
}
Ok(())
}

View File

@@ -0,0 +1,400 @@
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::path::PathBuf;
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct FileLocation {
pub path: PathBuf,
pub line: usize,
pub column: usize,
pub end_line: Option<usize>,
pub end_column: Option<usize>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum Priority {
Critical,
High,
Medium,
Low,
}
impl Priority {
pub fn from_keyword(keyword: &str) -> Self {
match keyword.to_uppercase().as_str() {
"FIXME" | "BUG" | "ERROR" => Priority::Critical,
"XXX" | "URGENT" | "CRITICAL" => Priority::High,
"TODO" | "TEMP" | "PERF" => Priority::Medium,
"HACK" | "NOTE" | "REFACTOR" | "XXX" => Priority::Low,
_ => Priority::Medium,
}
}
pub fn as_str(&self) -> &'static str {
match self {
Priority::Critical => "Critical",
Priority::High => "High",
Priority::Medium => "Medium",
Priority::Low => "Low",
}
}
}
impl PartialOrd for Priority {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Priority {
fn cmp(&self, other: &Self) -> Ordering {
fn rank(p: &Priority) -> u8 {
match p {
Priority::Critical => 4,
Priority::High => 3,
Priority::Medium => 2,
Priority::Low => 1,
}
}
rank(self).cmp(&rank(other))
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum CommentType {
SingleLine,
MultiLine,
DocBlock,
Shebang,
Comment,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TechDebtItem {
pub id: String,
pub keyword: String,
pub comment_type: CommentType,
pub content: String,
pub location: FileLocation,
pub priority: Priority,
pub complexity_score: u8,
pub context_before: Option<String>,
pub context_after: Option<String>,
pub metadata: ItemMetadata,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ItemMetadata {
pub language: String,
pub is_question: bool,
pub has_exclamation: bool,
pub word_count: usize,
pub created_at: Option<String>,
pub author: Option<String>,
}
impl TechDebtItem {
pub fn new(
keyword: String,
content: String,
location: FileLocation,
language: String,
comment_type: CommentType,
) -> Self {
let complexity_score = calculate_complexity(&content);
let is_question = content.contains('?');
let has_exclamation = content.contains('!');
let word_count = content.split_whitespace().count();
let priority = Priority::from_keyword(&keyword);
let id = format!(
"{:?}-{:?}-{}",
location.path,
location.line,
keyword
);
Self {
id,
keyword,
comment_type,
content,
location,
priority,
complexity_score,
context_before: None,
context_after: None,
metadata: ItemMetadata {
language,
is_question,
has_exclamation,
word_count,
created_at: None,
author: None,
},
}
}
}
fn calculate_complexity(content: &str) -> u8 {
let mut score = 1;
let question_count = content.matches('?').count();
let exclamation_count = content.matches('!').count();
let word_count = content.split_whitespace().count();
let uppercase_count = content.chars().filter(|c| c.is_uppercase()).count();
score += (question_count * 2) as u8;
score += exclamation_count as u8;
score += (word_count / 10) as u8;
score += (uppercase_count / 5) as u8;
if content.len() > 200 {
score += 2;
}
if content.len() > 500 {
score += 3;
}
if content.contains("HACK") || content.contains("WORKAROUND") {
score += 2;
}
std::cmp::min(score, 10)
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AnalysisSummary {
pub total_items: usize,
pub by_priority: ByPriority,
pub by_language: ByLanguage,
pub complexity_distribution: ComplexityDistribution,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ByPriority {
pub critical: usize,
pub high: usize,
pub medium: usize,
pub low: usize,
}
impl ByPriority {
pub fn from_items(items: &[TechDebtItem]) -> Self {
let mut critical = 0;
let mut high = 0;
let mut medium = 0;
let mut low = 0;
for item in items {
match item.priority {
Priority::Critical => critical += 1,
Priority::High => high += 1,
Priority::Medium => medium += 1,
Priority::Low => low += 1,
}
}
Self {
critical,
high,
medium,
low,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ByLanguage {
pub items: Vec<LanguageCount>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LanguageCount {
pub language: String,
pub count: usize,
}
impl ByLanguage {
pub fn from_items(items: &[TechDebtItem]) -> Self {
let mut counts: std::collections::HashMap<String, usize> =
std::collections::HashMap::new();
for item in items {
*counts
.entry(item.metadata.language.clone())
.or_insert(0) += 1;
}
let mut items_vec: Vec<LanguageCount> = counts
.into_iter()
.map(|(lang, count)| LanguageCount { lang, count })
.collect();
items_vec.sort_by(|a, b| b.count.cmp(&a.count));
Self { items: items_vec }
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ComplexityDistribution {
pub low: usize,
pub medium: usize,
pub high: usize,
pub critical: usize,
}
impl ComplexityDistribution {
pub fn from_items(items: &[TechDebtItem]) -> Self {
let mut low = 0;
let mut medium = 0;
let mut high = 0;
let mut critical = 0;
for item in items {
match item.complexity_score {
1..=3 => low += 1,
4..=6 => medium += 1,
7..=8 => high += 1,
9..=10 => critical += 1,
_ => {}
}
}
Self {
low,
medium,
high,
critical,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub patterns: Vec<PatternConfig>,
pub languages: Vec<String>,
pub ignore: Vec<String>,
pub extensions: Vec<String>,
pub complexity: ComplexityConfig,
pub export: ExportConfig,
}
impl Default for Config {
fn default() -> Self {
Self {
patterns: vec![
PatternConfig {
keyword: "FIXME".to_string(),
priority: Priority::Critical,
regex: false,
},
PatternConfig {
keyword: "TODO".to_string(),
priority: Priority::Medium,
regex: false,
},
PatternConfig {
keyword: "HACK".to_string(),
priority: Priority::Low,
regex: false,
},
PatternConfig {
keyword: "BUG".to_string(),
priority: Priority::Critical,
regex: false,
},
PatternConfig {
keyword: "XXX".to_string(),
priority: Priority::High,
regex: false,
},
PatternConfig {
keyword: "NOTE".to_string(),
priority: Priority::Low,
regex: false,
},
],
languages: vec![
"javascript".to_string(),
"typescript".to_string(),
"python".to_string(),
"rust".to_string(),
"go".to_string(),
"java".to_string(),
"c".to_string(),
"cpp".to_string(),
"ruby".to_string(),
],
ignore: vec![
"node_modules/**".to_string(),
"target/**".to_string(),
".git/**".to_string(),
"vendor/**".to_string(),
"dist/**".to_string(),
"build/**".to_string(),
],
extensions: vec![
".js".to_string(),
".ts".to_string(),
".jsx".to_string(),
".tsx".to_string(),
".py".to_string(),
".rs".to_string(),
".go".to_string(),
".java".to_string(),
".c".to_string(),
".cpp".to_string(),
".h".to_string(),
".hpp".to_string(),
".rb".to_string(),
".md".to_string(),
],
complexity: ComplexityConfig::default(),
export: ExportConfig::default(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PatternConfig {
pub keyword: String,
pub priority: Priority,
pub regex: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ComplexityConfig {
pub enabled: bool,
pub max_comment_length: usize,
pub question_weight: u8,
pub exclamation_weight: u8,
}
impl Default for ComplexityConfig {
fn default() -> Self {
Self {
enabled: true,
max_comment_length: 500,
question_weight: 2,
exclamation_weight: 1,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExportConfig {
pub include_metadata: bool,
pub include_context: bool,
}
impl Default for ExportConfig {
fn default() -> Self {
Self {
include_metadata: true,
include_context: true,
}
}
}

View File

@@ -0,0 +1,146 @@
use crate::tui::{render, TuiState};
use crossterm::{
event::{self, Event, KeyCode, KeyEventKind},
terminal::{disable_raw_mode, enable_raw_mode, Clear, ClearType},
ExecutableCommand,
};
use ratatui::{backend::CrosstermBackend, Terminal};
use std::io::Stdout;
use std::path::PathBuf;
use crate::models::TechDebtItem;
pub struct TuiApp {
state: TuiState,
terminal: Terminal<CrosstermBackend<Stdout>>,
}
impl TuiApp {
pub fn new(items: Vec<TechDebtItem>, path: PathBuf) -> Self {
let state = TuiState::new(items, path);
let terminal = Terminal::new(CrosstermBackend::new(std::io::stdout()))
.expect("Failed to create terminal");
Self { state, terminal }
}
pub fn run(&mut self) -> anyhow::Result<()> {
enable_raw_mode()?;
std::io::stdout().execute(Clear(ClearType::All))?;
loop {
self.terminal.draw(|f| render(f, &self.state))?;
if let Event::Key(key) = event::read()? {
if key.kind == KeyEventKind::Press {
match self.handle_input(key.code) {
Ok(should_exit) => {
if should_exit {
break;
}
}
Err(e) => {
eprintln!("Error handling input: {e}");
}
}
}
}
}
disable_raw_mode()?;
std::io::stdout().execute(Clear(ClearType::All))?;
Ok(())
}
fn handle_input(&mut self, code: KeyCode) -> anyhow::Result<bool> {
let mut should_exit = false;
match code {
KeyCode::Char('q') | KeyCode::Esc => {
should_exit = true;
}
KeyCode::Tab => {
self.state.current_view = match self.state.current_view {
crate::tui::View::Dashboard => crate::tui::View::List,
crate::tui::View::List => crate::tui::View::Dashboard,
crate::tui::View::Detail => crate::tui::View::List,
crate::tui::View::Export => crate::tui::View::Dashboard,
};
}
KeyCode::Up => {
if self.state.selected_index > 0 {
self.state.selected_index -= 1;
}
}
KeyCode::Down => {
let filtered_len = self.state.filtered_items().len();
if filtered_len > 0 && self.state.selected_index < filtered_len - 1 {
self.state.selected_index += 1;
}
}
KeyCode::Enter => {
if self.state.current_view == crate::tui::View::List {
self.state.current_view = crate::tui::View::Detail;
}
}
KeyCode::Char('/') => {
self.enable_filter_mode()?;
}
KeyCode::Char('f') => {
self.enable_filter_mode()?;
}
KeyCode::Char('s') => {
self.state.sort_order = match self.state.sort_order {
crate::tui::SortOrder::Priority => crate::tui::SortOrder::File,
crate::tui::SortOrder::File => crate::tui::SortOrder::Line,
crate::tui::SortOrder::Line => crate::tui::SortOrder::Keyword,
crate::tui::SortOrder::Keyword => crate::tui::SortOrder::Priority,
};
}
KeyCode::Char('c') => {
self.state.filter_priority = None;
self.state.filter_text.clear();
}
KeyCode::Char('1') => {
self.state.filter_priority = Some(crate::models::Priority::Critical);
}
KeyCode::Char('2') => {
self.state.filter_priority = Some(crate::models::Priority::High);
}
KeyCode::Char('3') => {
self.state.filter_priority = Some(crate::models::Priority::Medium);
}
KeyCode::Char('4') => {
self.state.filter_priority = Some(crate::models::Priority::Low);
}
_ => {}
}
Ok(should_exit)
}
fn enable_filter_mode(&mut self) -> anyhow::Result<()> {
disable_raw_mode()?;
println!("Enter filter text (or press ESC to cancel): ");
std::io::stdout().flush()?;
let mut input = String::new();
std::io::stdin().read_line(&mut input)?;
input = input.trim_end().to_string();
if input.is_empty() || input.as_bytes().first() == Some(&0x1b) {
self.state.filter_text.clear();
} else {
self.state.filter_text = input;
}
enable_raw_mode()?;
std::io::stdout().execute(Clear(ClearType::All))?;
Ok(())
}
}

View File

@@ -0,0 +1,544 @@
use crossterm::{
terminal::{disable_raw_mode, enable_raw_mode, Clear},
ExecutableCommand,
};
use ratatui::{
backend::CrosstermBackend,
layout::{Constraint, Direction, Layout, Rect},
style::{Color, Modifier, Style, Stylize},
text::{Line, Span},
widgets::{
Block, Borders, Cell, Gauge, List, ListItem, Paragraph, Row, Table, Wrap,
},
Frame, Terminal,
};
use std::io::Stdout;
use std::path::PathBuf;
use crate::models::{AnalysisSummary, ByLanguage, ByPriority, Priority, TechDebtItem};
pub mod app;
pub use app::TuiApp;
#[derive(Debug, Clone, PartialEq)]
pub enum View {
Dashboard,
List,
Detail,
Export,
}
#[derive(Debug, Clone, PartialEq)]
enum SortOrder {
Priority,
File,
Line,
Keyword,
}
pub struct TuiState {
pub items: Vec<TechDebtItem>,
pub current_view: View,
pub selected_index: usize,
pub filter_text: String,
pub filter_priority: Option<Priority>,
pub sort_order: SortOrder,
pub show_help: bool,
pub path: PathBuf,
}
impl TuiState {
pub fn new(items: Vec<TechDebtItem>, path: PathBuf) -> Self {
Self {
items,
current_view: View::Dashboard,
selected_index: 0,
filter_text: String::new(),
filter_priority: None,
sort_order: SortOrder::Priority,
show_help: false,
path,
}
}
pub fn filtered_items(&self) -> Vec<&TechDebtItem> {
let mut items: Vec<&TechDebtItem> = self.items.iter().collect();
if !self.filter_text.is_empty() {
items.retain(|item| {
item.content.to_lowercase().contains(&self.filter_text.to_lowercase())
|| item
.location
.path
.to_string_lossy()
.to_lowercase()
.contains(&self.filter_text.to_lowercase())
});
}
if let Some(priority) = self.filter_priority {
items.retain(|item| item.priority == priority);
}
match self.sort_order {
SortOrder::Priority => items.sort_by(|a, b| b.priority.cmp(&a.priority)),
SortOrder::File => items.sort_by(|a, b| {
a.location
.path
.cmp(&b.location.path)
.then_with(|| a.location.line.cmp(&b.location.line))
}),
SortOrder::Line => items.sort_by(|a, b| a.location.line.cmp(&b.location.line)),
SortOrder::Keyword => items.sort_by(|a, b| a.keyword.cmp(&b.keyword)),
}
items
}
pub fn summary(&self) -> AnalysisSummary {
let by_priority = ByPriority::from_items(&self.items);
let by_language = ByLanguage::from_items(&self.items);
let complexity_distribution =
crate::models::ComplexityDistribution::from_items(&self.items);
AnalysisSummary {
total_items: self.items.len(),
by_priority,
by_language,
complexity_distribution,
}
}
}
fn draw_dashboard(f: &mut Frame<CrosstermBackend<Stdout>>, state: &TuiState, area: Rect) {
let summary = state.summary();
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3),
Constraint::Min(10),
Constraint::Length(3),
])
.split(area);
let title = Paragraph::new("TECHDEBT TRACKER - Dashboard")
.style(Style::default().fg(Color::Cyan).bold());
f.render_widget(title, chunks[0]);
let main_content = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)])
.split(chunks[1]);
let left_content = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(4),
Constraint::Length(4),
Constraint::Length(4),
Constraint::Min(0),
])
.split(main_content[0]);
let right_content = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(4),
Constraint::Length(4),
Constraint::Min(0),
])
.split(main_content[1]);
let priority_block = Block::default()
.title("Priority Breakdown")
.borders(Borders::ALL)
.style(Style::default().fg(Color::White));
f.render_widget(priority_block, left_content[0]);
let priority_content = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(1),
Constraint::Length(1),
Constraint::Length(1),
Constraint::Length(1),
])
.split(left_content[0].inner);
let critical_gauge = Gauge::default()
.gauge_style(Style::default().fg(Color::Red))
.label(&format!("Critical: {}", summary.by_priority.critical))
.ratio(if summary.total_items > 0 {
summary.by_priority.critical as f64 / summary.total_items as f64
} else {
0.0
});
f.render_widget(critical_gauge, priority_content[0]);
let high_gauge = Gauge::default()
.gauge_style(Style::default().fg(Color::Yellow))
.label(&format!("High: {}", summary.by_priority.high))
.ratio(if summary.total_items > 0 {
summary.by_priority.high as f64 / summary.total_items as f64
} else {
0.0
});
f.render_widget(high_gauge, priority_content[1]);
let medium_gauge = Gauge::default()
.gauge_style(Style::default().fg(Color::Blue))
.label(&format!("Medium: {}", summary.by_priority.medium))
.ratio(if summary.total_items > 0 {
summary.by_priority.medium as f64 / summary.total_items as f64
} else {
0.0
});
f.render_widget(medium_gauge, priority_content[2]);
let low_gauge = Gauge::default()
.gauge_style(Style::default().fg(Color::Green))
.label(&format!("Low: {}", summary.by_priority.low))
.ratio(if summary.total_items > 0 {
summary.by_priority.low as f64 / summary.total_items as f64
} else {
0.0
});
f.render_widget(low_gauge, priority_content[3]);
let total_block = Block::default()
.title("Total Items")
.borders(Borders::ALL)
.style(Style::default().fg(Color::White));
f.render_widget(total_block, left_content[1]);
let total_text = Paragraph::new(format!("{}", summary.total_items))
.style(Style::default().fg(Color::Cyan).bold());
f.render_widget(total_text, left_content[1].inner);
let complexity_block = Block::default()
.title("Complexity Distribution")
.borders(Borders::ALL)
.style(Style::default().fg(Color::White));
f.render_widget(complexity_block, left_content[2]);
let complexity_text = format!(
"Low: {} Medium: {} High: {} Critical: {}",
summary.complexity_distribution.low,
summary.complexity_distribution.medium,
summary.complexity_distribution.high,
summary.complexity_distribution.critical
);
let complexity_para = Paragraph::new(complexity_text);
f.render_widget(complexity_para, left_content[2].inner);
let language_block = Block::default()
.title("By Language")
.borders(Borders::ALL)
.style(Style::default().fg(Color::White));
f.render_widget(language_block, right_content[0]);
let lang_rows: Vec<Row> = summary
.by_language
.items
.iter()
.take(5)
.map(|lang| {
Row::new(vec![
Cell::from(lang.language.clone()),
Cell::from(lang.count.to_string()),
])
})
.collect();
let lang_table = Table::new(lang_rows)
.widths(&[Constraint::Percentage(60), Constraint::Percentage(40)])
.column_spacing(1);
f.render_widget(lang_table, right_content[0].inner);
let help_block = Block::default()
.title("Navigation Help")
.borders(Borders::ALL)
.style(Style::default().fg(Color::White));
f.render_widget(help_block, right_content[1]);
let help_text = "↑↓: Navigate | Tab: Switch View | q: Quit | /: Filter";
let help_para = Paragraph::new(help_text);
f.render_widget(help_para, right_content[1].inner);
let status_bar = Line::from(vec![
Span::styled(" Press ", Style::default().fg(Color::Gray)),
Span::styled("TAB", Style::default().fg(Color::Yellow).bold()),
Span::styled(" to view items | ", Style::default().fg(Color::Gray)),
Span::styled("q", Style::default().fg(Color::Yellow).bold()),
Span::styled(" to quit", Style::default().fg(Color::Gray)),
]);
f.render_widget(status_bar, chunks[2]);
}
fn draw_list(f: &mut Frame<CrosstermBackend<Stdout>>, state: &TuiState, area: Rect) {
let filtered = state.filtered_items();
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3),
Constraint::Min(10),
Constraint::Length(3),
])
.split(area);
let filter_text = if state.filter_text.is_empty() {
format!("Filter ({} items) - Press / to filter", filtered.len())
} else {
format!(
"Filter: {} - {} results (Press / to edit, ESC to clear)",
state.filter_text,
filtered.len()
)
};
let filter_block = Paragraph::new(filter_text)
.style(Style::default().fg(Color::Yellow));
f.render_widget(filter_block, chunks[0]);
let list_items: Vec<ListItem> = filtered
.iter()
.enumerate()
.skip(state.selected_index.saturating_sub(10))
.take(chunks[1].height as usize - 2)
.map(|(idx, item)| {
let priority_color = match item.priority {
Priority::Critical => Color::Red,
Priority::High => Color::Yellow,
Priority::Medium => Color::Blue,
Priority::Low => Color::Green,
};
let content = format!(
"[{}] {}:{} - {} ({}/10) - {}",
item.keyword,
item.location.path.file_name().unwrap_or_default().to_string_lossy(),
item.location.line,
item.content.chars().take(40).collect::<String>(),
item.complexity_score,
item.metadata.language
);
let style = if *idx == state.selected_index {
Style::default()
.fg(priority_color)
.bg(Color::DarkGray)
.add_modifier(Modifier::BOLD)
} else {
Style::default().fg(priority_color)
};
ListItem::new(content).style(style)
})
.collect();
let list = List::new(list_items).block(
Block::default()
.title(format!("Items ({})", filtered.len()))
.borders(Borders::ALL),
);
f.render_widget(list, chunks[1]);
let status_bar = Line::from(vec![
Span::styled(" ↑↓: Navigate | ", Style::default().fg(Color::Gray)),
Span::styled("ENTER", Style::default().fg(Color::Yellow).bold()),
Span::styled(": View Detail | ", Style::default().fg(Color::Gray)),
Span::styled("TAB", Style::default().fg(Color::Yellow).bold()),
Span::styled(": Dashboard | ", Style::default().fg(Color::Gray)),
Span::styled("f", Style::default().fg(Color::Yellow).bold()),
Span::styled(": Filter | ", Style::default().fg(Color::Gray)),
Span::styled("s", Style::default().fg(Color::Yellow).bold()),
Span::styled(": Sort | ", Style::default().fg(Color::Gray)),
Span::styled("q", Style::default().fg(Color::Yellow).bold()),
Span::styled(": Quit", Style::default().fg(Color::Gray)),
]);
f.render_widget(status_bar, chunks[2]);
}
fn draw_detail(f: &mut Frame<CrosstermBackend<Stdout>>, state: &TuiState, area: Rect) {
let filtered = state.filtered_items();
if filtered.is_empty() {
let para = Paragraph::new("No items selected");
f.render_widget(para, area);
return;
}
let item = if let Some(item) = filtered.get(state.selected_index) {
item
} else {
state.selected_index = 0;
if let Some(item) = filtered.get(0) {
item
} else {
return;
}
};
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3),
Constraint::Min(5),
Constraint::Length(3),
])
.split(area);
let title = format!(
"Detail - {} at {}:{}",
item.keyword,
item.location.path.display(),
item.location.line
);
let title_block = Paragraph::new(title)
.style(Style::default().fg(Color::Cyan).bold());
f.render_widget(title_block, chunks[0]);
let detail_chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)])
.split(chunks[1]);
let left_details = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(2),
Constraint::Length(2),
Constraint::Length(2),
Constraint::Length(2),
Constraint::Min(0),
])
.split(detail_chunks[0]);
let priority_color = match item.priority {
Priority::Critical => Color::Red,
Priority::High => Color::Yellow,
Priority::Medium => Color::Blue,
Priority::Low => Color::Green,
};
let priority_line = Line::from(vec![
Span::styled("Priority: ", Style::default().fg(Color::Gray)),
Span::styled(item.priority.as_str(), Style::default().fg(priority_color).bold()),
]);
f.render_widget(priority_line, left_details[0]);
let complexity_line = Line::from(vec![
Span::styled("Complexity: ", Style::default().fg(Color::Gray)),
Span::styled(
format!("{}/10", item.complexity_score),
Style::default()
.fg(match item.complexity_score {
1..=3 => Color::Green,
4..=6 => Color::Yellow,
7..=8 => Color::Red,
_ => Color::Magenta,
})
.bold(),
),
]);
f.render_widget(complexity_line, left_details[1]);
let language_line = Line::from(vec![
Span::styled("Language: ", Style::default().fg(Color::Gray)),
Span::styled(&item.metadata.language, Style::default().fg(Color::Cyan)),
]);
f.render_widget(language_line, left_details[2]);
let type_line = Line::from(vec![
Span::styled("Type: ", Style::default().fg(Color::Gray)),
Span::styled(
format!("{:?}", item.comment_type),
Style::default().fg(Color::Blue),
),
]);
f.render_widget(type_line, left_details[3]);
let content_block = Block::default().title("Content").borders(Borders::ALL);
f.render_widget(content_block, left_details[4]);
let content_para = Paragraph::new(&item.content)
.wrap(Wrap { trim: true });
f.render_widget(content_para, left_details[4].inner);
let right_details = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(2),
Constraint::Length(2),
Constraint::Length(2),
Constraint::Min(0),
])
.split(detail_chunks[1]);
let path_line = Line::from(vec![
Span::styled("File: ", Style::default().fg(Color::Gray)),
Span::styled(
item.location.path.display().to_string(),
Style::default().fg(Color::White),
),
]);
f.render_widget(path_line, right_details[0]);
let location_line = Line::from(vec![
Span::styled("Line: ", Style::default().fg(Color::Gray)),
Span::styled(
item.location.line.to_string(),
Style::default().fg(Color::White),
),
Span::styled(" | Column: ", Style::default().fg(Color::Gray)),
Span::styled(
item.location.column.to_string(),
Style::default().fg(Color::White),
),
]);
f.render_widget(location_line, right_details[1]);
let question_line = Line::from(vec![
Span::styled("Contains Question: ", Style::default().fg(Color::Gray)),
Span::styled(
if item.metadata.is_question { "Yes" } else { "No" },
Style::default().fg(if item.metadata.is_question {
Color::Yellow
} else {
Color::Green
}),
),
]);
f.render_widget(question_line, right_details[2]);
let word_count_line = Line::from(vec![
Span::styled("Word Count: ", Style::default().fg(Color::Gray)),
Span::styled(item.metadata.word_count.to_string(), Style::default().fg(Color::White)),
]);
f.render_widget(word_count_line, right_details[3]);
let status_bar = Line::from(vec![
Span::styled(" ←: Back | ", Style::default().fg(Color::Gray)),
Span::styled("e", Style::default().fg(Color::Yellow).bold()),
Span::styled(": Export | ", Style::default().fg(Color::Gray)),
Span::styled("q", Style::default().fg(Color::Yellow).bold()),
Span::styled(": Quit", Style::default().fg(Color::Gray)),
]);
f.render_widget(status_bar, chunks[2]);
}
pub fn render(f: &mut Frame<CrosstermBackend<Stdout>>, state: &TuiState) {
let area = f.size();
let block = Block::default()
.style(Style::default().bg(Color::Black))
.borders(Borders::NONE);
f.render_widget(block, area);
match state.current_view {
View::Dashboard => draw_dashboard(f, state, area),
View::List => draw_list(f, state, area),
View::Detail => draw_detail(f, state, area),
View::Export => {
let para = Paragraph::new("Export feature - Coming soon!");
f.render_widget(para, area);
}
}
}

View File

@@ -0,0 +1,52 @@
use std::path::PathBuf;
use techdebt_tracker_cli::models::{Priority, TechDebtItem, FileLocation};
#[test]
fn test_priority_from_keyword() {
assert_eq!(Priority::from_keyword("FIXME"), Priority::Critical);
assert_eq!(Priority::from_keyword("BUG"), Priority::Critical);
assert_eq!(Priority::from_keyword("TODO"), Priority::Medium);
assert_eq!(Priority::from_keyword("HACK"), Priority::Low);
assert_eq!(Priority::from_keyword("XXX"), Priority::High);
assert_eq!(Priority::from_keyword("NOTE"), Priority::Low);
}
#[test]
fn test_priority_ordering() {
assert!(Priority::Critical > Priority::High);
assert!(Priority::High > Priority::Medium);
assert!(Priority::Medium > Priority::Low);
assert!(Priority::Low < Priority::Critical);
}
#[test]
fn test_priority_as_str() {
assert_eq!(Priority::Critical.as_str(), "Critical");
assert_eq!(Priority::High.as_str(), "High");
assert_eq!(Priority::Medium.as_str(), "Medium");
assert_eq!(Priority::Low.as_str(), "Low");
}
#[test]
fn test_tech_debt_item_creation() {
let location = FileLocation {
path: PathBuf::from("/test/file.rs"),
line: 10,
column: 5,
end_line: None,
end_column: None,
};
let item = TechDebtItem::new(
"FIXME".to_string(),
"This is a test fixme".to_string(),
location,
"Rust".to_string(),
techdebt_tracker_cli::models::CommentType::SingleLine,
);
assert_eq!(item.keyword, "FIXME");
assert_eq!(item.priority, Priority::Critical);
assert!(!item.id.is_empty());
}

View File

@@ -0,0 +1,47 @@
use assert_cmd::Command;
use std::path::PathBuf;
#[test]
fn test_cli_help() {
let mut cmd = Command::cargo_bin("techdebt-tracker-cli").unwrap();
cmd.arg("--help")
.assert()
.success();
}
#[test]
fn test_cli_version() {
let mut cmd = Command::cargo_bin("techdebt-tracker-cli").unwrap();
cmd.arg("--version")
.assert()
.success();
}
#[test]
fn test_init_command() {
let temp_dir = tempfile::tempdir().unwrap();
let temp_path = temp_dir.path().to_path_buf();
let mut cmd = Command::cargo_bin("techdebt-tracker-cli").unwrap();
cmd.arg("init")
.arg("--path")
.arg(&temp_path)
.assert()
.success();
let config_path = temp_path.join("techdebt.yaml");
assert!(config_path.exists());
let content = std::fs::read_to_string(&config_path).unwrap();
assert!(content.contains("patterns:"));
assert!(content.contains("languages:"));
}
#[test]
fn test_analyze_command_nonexistent_path() {
let mut cmd = Command::cargo_bin("techdebt-tracker-cli").unwrap();
cmd.arg("analyze")
.arg("/nonexistent/path")
.assert()
.failure();
}

View File

@@ -0,0 +1,81 @@
// JavaScript sample file with technical debt comments
function calculateTotal(items) {
// TODO: This should use reduce instead of a loop
let total = 0;
for (let i = 0; i < items.length; i++) {
total += items[i].price;
}
return total;
}
function fetchUserData(userId) {
// FIXME: API error handling is missing
// FIXME: This endpoint may return null
return fetch(`/api/users/${userId}`)
.then(response => response.json());
}
function processPayment(amount) {
// HACK: Quick fix for holiday season
if (amount > 1000) {
console.log("High value transaction");
}
// TODO: Implement proper payment processing
}
function createUser(name, email) {
// NOTE: Email validation is basic
if (!email.includes('@')) {
throw new Error('Invalid email');
}
return { name, email };
}
class DataProcessor {
constructor() {
this.data = [];
// XXX: Memory leak - data is never cleared
}
addItem(item) {
this.data.push(item);
}
process() {
// BUG: This doesn't handle edge cases
return this.data.map(x => x * 2);
}
}
// TODO: Add JSDoc comments
// TODO: Write unit tests
async function loadConfig() {
// FIXME: Hardcoded path should be configurable
const response = await fetch('/config.json');
return response.json();
}
function temporaryFix() {
// TEMP: Remove this after Q1
return { status: 'pending' };
}
function oldCode() {
// REFACTOR: This code is legacy
let result = 0;
for (let i = 0; i < 10; i++) {
result += i;
}
return result;
}
module.exports = {
calculateTotal,
fetchUserData,
processPayment,
createUser,
DataProcessor,
loadConfig,
};

View File

@@ -0,0 +1,94 @@
# Python sample file with technical debt comments
def calculate_average(numbers):
# TODO: Handle empty list case
return sum(numbers) / len(numbers)
def process_user(user_data):
# FIXME: This may raise KeyError for missing fields
name = user_data['name']
email = user_data['email']
return {'name': name, 'email': email}
def fetch_data_from_api(endpoint):
# HACK: Skip SSL verification for testing
import requests
response = requests.get(endpoint, verify=False)
# TODO: Add retry logic
return response.json()
class DatabaseConnection:
def __init__(self, connection_string):
# FIXME: Connection string should be encrypted
self.connection_string = connection_string
def connect(self):
# BUG: Connection timeout not implemented
print("Connecting to database...")
def disconnect(self):
# NOTE: Pool cleanup happens automatically
print("Disconnecting...")
def temp_workaround():
# TEMP: Quick fix for production issue
return None
def old_implementation():
# REFACTOR: Use list comprehension instead
result = []
for i in range(10):
result.append(i * 2)
return result
def validate_input(data):
# XXX: Critical security vulnerability!
# This eval is dangerous
return eval(data) # nosec
def complex_function():
# TODO: This function is too long, split it up
# TODO: Add type hints
# TODO: Add docstring
x = 1
y = 2
z = 3
a = 4
b = 5
return x + y + z + a + b
class LegacyClass:
"""This class needs refactoring."""
def __init__(self):
self._internal_state = None
# FIXME: Memory leak risk
def _old_method(self):
# NOTE: This is deprecated
pass
def new_method(self):
"""Modern replacement for _old_method."""
pass
# TODO: Add exception handling
# TODO: Write docstrings for all public methods
# TODO: Add unit tests
def main():
data = [1, 2, 3, 4, 5]
avg = calculate_average(data)
print(f"Average: {avg}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,60 @@
// This is a Rust file with various TODO/FIXME comments
fn calculate_sum(a: i32, b: i32) -> i32 {
// TODO: Implement proper error handling
a + b
}
fn process_data(data: &[u8]) -> Result<(), String> {
// FIXME: This function has a bug with empty data
if data.is_empty() {
return Err("No data provided".to_string());
}
Ok(())
}
fn complex_function(x: i32) -> i32 {
// HACK: This is a workaround for a dependency issue
// TODO: Refactor this when the library is updated
x * 2 + 1
}
fn another_function() {
// NOTE: This function is deprecated
// TODO: Remove in next version
println!("This is deprecated");
}
struct User {
id: u32,
name: String,
}
impl User {
fn new(id: u32, name: String) -> Self {
// FIXME: Validation is missing here!
Self { id, name }
}
}
// XXX: This is a critical issue that needs immediate attention
fn critical_function() {
// BUG: Memory leak detected here
let _data = vec![1, 2, 3];
}
// TODO: Implement unit tests for this module
// TODO: Add documentation comments
fn temp_impl() {
// TEMP: Quick fix for release
println!("temp");
}
fn refactor_needed() {
// REFACTOR: This code is hard to maintain
let x = 1;
let y = 2;
let z = 3;
println!("{} {} {}", x, y, z);
}

View File

@@ -0,0 +1,113 @@
use std::path::PathBuf;
use techdebt_tracker_cli::core::language::{Language, LanguageParser};
use techdebt_tracker_cli::models::{Config, PatternConfig, Priority};
#[test]
fn test_language_from_path_js() {
let path = PathBuf::from("test.js");
assert_eq!(Language::from_path(&path), Some(Language::JavaScript));
}
#[test]
fn test_language_from_path_ts() {
let path = PathBuf::from("test.ts");
assert_eq!(Language::from_path(&path), Some(Language::TypeScript));
}
#[test]
fn test_language_from_path_py() {
let path = PathBuf::from("test.py");
assert_eq!(Language::from_path(&path), Some(Language::Python));
}
#[test]
fn test_language_from_path_rs() {
let path = PathBuf::from("test.rs");
assert_eq!(Language::from_path(&path), Some(Language::Rust));
}
#[test]
fn test_language_from_path_unknown() {
let path = PathBuf::from("test.xyz");
assert_eq!(Language::from_path(&path), None);
}
#[test]
fn test_language_single_line_comment() {
assert_eq!(Language::JavaScript.single_line_comment(), Some("//"));
assert_eq!(Language::Python.single_line_comment(), Some("#"));
assert_eq!(Language::Rust.single_line_comment(), Some("//"));
assert_eq!(Language::Unknown.single_line_comment(), None);
}
#[test]
fn test_language_as_str() {
assert_eq!(Language::JavaScript.as_str(), "JavaScript");
assert_eq!(Language::Python.as_str(), "Python");
assert_eq!(Language::Rust.as_str(), "Rust");
}
#[test]
fn test_javascript_comment_parsing() {
let content = r#"
function test() {
// TODO: Implement this
return true;
}
"#;
let parser = LanguageParser::new(Language::JavaScript);
let patterns = vec![PatternConfig {
keyword: "TODO".to_string(),
priority: Priority::Medium,
regex: false,
}];
let items = parser.parse(content, &PathBuf::from("test.js"), &patterns).unwrap();
assert!(!items.is_empty());
assert!(items.iter().any(|i| i.keyword == "TODO"));
}
#[test]
fn test_python_comment_parsing() {
let content = r#"
def test():
# FIXME: This is broken
return True
"#;
let parser = LanguageParser::new(Language::Python);
let patterns = vec![PatternConfig {
keyword: "FIXME".to_string(),
priority: Priority::Critical,
regex: false,
}];
let items = parser.parse(content, &PathBuf::from("test.py"), &patterns).unwrap();
assert!(!items.is_empty());
assert!(items.iter().any(|i| i.keyword == "FIXME"));
}
#[test]
fn test_multiline_comment_parsing() {
let content = r#"
/*
* TODO: Multi-line comment
* Need to fix this later
*/
"#;
let parser = LanguageParser::new(Language::JavaScript);
let patterns = vec![PatternConfig {
keyword: "TODO".to_string(),
priority: Priority::Medium,
regex: false,
}];
let items = parser.parse(content, &PathBuf::from("test.js"), &patterns).unwrap();
assert!(!items.is_empty());
}