From 58a16b9eefa938f96a6eec97f5d5743914b7905d Mon Sep 17 00:00:00 2001 From: 7000pctAUTO Date: Mon, 2 Feb 2026 00:08:15 +0000 Subject: [PATCH] fix: resolve CI/CD issues - Poetry setup, type annotations, MyPy errors --- codechunk/core/formatter.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/codechunk/core/formatter.py b/codechunk/core/formatter.py index 65014dd..678c24e 100644 --- a/codechunk/core/formatter.py +++ b/codechunk/core/formatter.py @@ -1,4 +1,3 @@ -from typing import List, Optional from codechunk.core.chunking import ParsedChunk @@ -10,7 +9,7 @@ class OutputFormatter: self.max_tokens = max_tokens self.token_warning_thresholds = [0.7, 0.9, 1.0] - def format(self, chunks: List[ParsedChunk]) -> str: + def format(self, chunks: list[ParsedChunk]) -> str: """Format chunks for output.""" if self.format_type == "ollama": return self._format_ollama(chunks) @@ -19,7 +18,7 @@ class OutputFormatter: else: return self._format_markdown(chunks) - def _format_ollama(self, chunks: List[ParsedChunk]) -> str: + def _format_ollama(self, chunks: list[ParsedChunk]) -> str: """Format for Ollama.""" lines = [] lines.append("### System") @@ -56,7 +55,7 @@ class OutputFormatter: return "\n".join(lines) - def _format_lmstudio(self, chunks: List[ParsedChunk]) -> str: + def _format_lmstudio(self, chunks: list[ParsedChunk]) -> str: """Format for LM Studio.""" import json @@ -99,7 +98,7 @@ Provide clear, accurate code analysis and assistance.""" return json.dumps(messages, indent=2) - def _format_markdown(self, chunks: List[ParsedChunk]) -> str: + def _format_markdown(self, chunks: list[ParsedChunk]) -> str: """Format as markdown.""" lines = [] lines.append("# Code Context") @@ -183,7 +182,7 @@ Provide clear, accurate code analysis and assistance.""" else: return True, ratio, "OK" - def prune_for_limit(self, chunks: List[ParsedChunk], max_tokens: int) -> List[ParsedChunk]: + def prune_for_limit(self, chunks: list[ParsedChunk], max_tokens: int) -> list[ParsedChunk]: """Prune chunks to fit within token limit.""" result = [] current_tokens = 0