mirror of
https://github.com/kjanat/articulate-parser.git
synced 2026-01-16 09:42:09 +01:00
chore: Improve code quality and address linter feedback
This commit introduces several improvements across the codebase, primarily focused on enhancing performance, robustness, and developer experience based on static analysis feedback. - Replaces `WriteString(fmt.Sprintf())` with the more performant `fmt.Fprintf` in the HTML and Markdown exporters. - Enhances deferred `Close()` operations to log warnings on failure instead of silently ignoring potential I/O issues. - Explicitly discards non-critical errors in test suites, particularly during file cleanup, to satisfy linters and clarify intent. - Suppresses command echoing in `Taskfile.yml` for cleaner output during development tasks.
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@ -71,3 +71,5 @@ main_coverage
|
||||
.idea/
|
||||
|
||||
.task/
|
||||
|
||||
**/*.local.*
|
||||
|
||||
10
Taskfile.yml
10
Taskfile.yml
@ -180,6 +180,7 @@ tasks:
|
||||
# Code quality tasks
|
||||
lint:
|
||||
desc: Run all linters
|
||||
silent: true
|
||||
aliases: [l]
|
||||
cmds:
|
||||
- task: lint:vet
|
||||
@ -189,11 +190,13 @@ tasks:
|
||||
|
||||
lint:vet:
|
||||
desc: Run go vet
|
||||
silent: true
|
||||
cmds:
|
||||
- go vet ./...
|
||||
|
||||
lint:fmt:
|
||||
desc: Check code formatting
|
||||
silent: true
|
||||
vars:
|
||||
UNFORMATTED:
|
||||
sh: gofmt -s -l .
|
||||
@ -209,6 +212,7 @@ tasks:
|
||||
|
||||
lint:staticcheck:
|
||||
desc: Run staticcheck (install if needed)
|
||||
silent: true
|
||||
vars:
|
||||
HAS_STATICCHECK:
|
||||
sh: '{{if eq OS "windows"}}where.exe staticcheck 2>NUL{{else}}command -v staticcheck 2>/dev/null{{end}}'
|
||||
@ -219,6 +223,7 @@ tasks:
|
||||
|
||||
lint:golangci:
|
||||
desc: Run golangci-lint (install if needed)
|
||||
silent: true
|
||||
aliases: [golangci, golangci-lint]
|
||||
vars:
|
||||
HAS_GOLANGCI:
|
||||
@ -230,6 +235,7 @@ tasks:
|
||||
|
||||
lint:golangci:fix:
|
||||
desc: Run golangci-lint with auto-fix
|
||||
silent: true
|
||||
aliases: [golangci-fix]
|
||||
vars:
|
||||
HAS_GOLANGCI:
|
||||
@ -237,10 +243,11 @@ tasks:
|
||||
cmds:
|
||||
- '{{if eq .HAS_GOLANGCI ""}}echo "Installing golangci-lint..." && go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest{{end}}'
|
||||
- golangci-lint run --fix ./...
|
||||
- echo "✅ golangci-lint fixes applied"
|
||||
- echo "golangci-lint fixes applied"
|
||||
|
||||
fmt:
|
||||
desc: Format all Go files
|
||||
silent: true
|
||||
aliases: [format]
|
||||
cmds:
|
||||
- gofmt -s -w .
|
||||
@ -248,6 +255,7 @@ tasks:
|
||||
|
||||
modernize:
|
||||
desc: Modernize Go code to use modern idioms
|
||||
silent: true
|
||||
aliases: [modern]
|
||||
cmds:
|
||||
- go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./...
|
||||
|
||||
@ -76,7 +76,15 @@ func (e *DocxExporter) Export(course *models.Course, outputPath string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
// Ensure file is closed even if WriteTo fails. Close errors are logged but not
|
||||
// fatal since the document content has already been written to disk. A close
|
||||
// error typically indicates a filesystem synchronization issue that doesn't
|
||||
// affect the validity of the exported file.
|
||||
defer func() {
|
||||
if err := file.Close(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: failed to close output file: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Save the document
|
||||
_, err = doc.WriteTo(file)
|
||||
|
||||
@ -618,8 +618,10 @@ func BenchmarkDocxExporter_Export(b *testing.B) {
|
||||
for b.Loop() {
|
||||
outputPath := filepath.Join(tempDir, "benchmark-course.docx")
|
||||
_ = exporter.Export(course, outputPath)
|
||||
// Clean up for next iteration
|
||||
os.Remove(outputPath)
|
||||
// Clean up for next iteration. Remove errors are ignored because we've already
|
||||
// benchmarked the export operation; cleanup failures don't affect the benchmark
|
||||
// measurements or the validity of the next iteration's export.
|
||||
_ = os.Remove(outputPath)
|
||||
}
|
||||
}
|
||||
|
||||
@ -672,6 +674,8 @@ func BenchmarkDocxExporter_ComplexCourse(b *testing.B) {
|
||||
for b.Loop() {
|
||||
outputPath := filepath.Join(tempDir, "benchmark-complex.docx")
|
||||
_ = exporter.Export(course, outputPath)
|
||||
os.Remove(outputPath)
|
||||
// Remove errors are ignored because we're only benchmarking the export
|
||||
// operation itself; cleanup failures don't affect the benchmark metrics.
|
||||
_ = os.Remove(outputPath)
|
||||
}
|
||||
}
|
||||
|
||||
@ -335,10 +335,10 @@ func (e *HTMLExporter) processTextItem(buf *bytes.Buffer, item models.Item) {
|
||||
buf.WriteString(" <h4>Text Content</h4>\n")
|
||||
for _, subItem := range item.Items {
|
||||
if subItem.Heading != "" {
|
||||
buf.WriteString(fmt.Sprintf(" <h5>%s</h5>\n", subItem.Heading))
|
||||
fmt.Fprintf(buf, " <h5>%s</h5>\n", subItem.Heading)
|
||||
}
|
||||
if subItem.Paragraph != "" {
|
||||
buf.WriteString(fmt.Sprintf(" <div>%s</div>\n", subItem.Paragraph))
|
||||
fmt.Fprintf(buf, " <div>%s</div>\n", subItem.Paragraph)
|
||||
}
|
||||
}
|
||||
buf.WriteString(" </div>\n\n")
|
||||
@ -352,7 +352,7 @@ func (e *HTMLExporter) processListItem(buf *bytes.Buffer, item models.Item) {
|
||||
for _, subItem := range item.Items {
|
||||
if subItem.Paragraph != "" {
|
||||
cleanText := e.htmlCleaner.CleanHTML(subItem.Paragraph)
|
||||
buf.WriteString(fmt.Sprintf(" <li>%s</li>\n", html.EscapeString(cleanText)))
|
||||
fmt.Fprintf(buf, " <li>%s</li>\n", html.EscapeString(cleanText))
|
||||
}
|
||||
}
|
||||
buf.WriteString(" </ul>\n")
|
||||
@ -365,13 +365,13 @@ func (e *HTMLExporter) processKnowledgeCheckItem(buf *bytes.Buffer, item models.
|
||||
buf.WriteString(" <h4>Knowledge Check</h4>\n")
|
||||
for _, subItem := range item.Items {
|
||||
if subItem.Title != "" {
|
||||
buf.WriteString(fmt.Sprintf(" <p><strong>Question:</strong> %s</p>\n", subItem.Title))
|
||||
fmt.Fprintf(buf, " <p><strong>Question:</strong> %s</p>\n", subItem.Title)
|
||||
}
|
||||
if len(subItem.Answers) > 0 {
|
||||
e.processAnswers(buf, subItem.Answers)
|
||||
}
|
||||
if subItem.Feedback != "" {
|
||||
buf.WriteString(fmt.Sprintf(" <div class=\"feedback\"><strong>Feedback:</strong> %s</div>\n", subItem.Feedback))
|
||||
fmt.Fprintf(buf, " <div class=\"feedback\"><strong>Feedback:</strong> %s</div>\n", subItem.Feedback)
|
||||
}
|
||||
}
|
||||
buf.WriteString(" </div>\n\n")
|
||||
@ -383,20 +383,20 @@ func (e *HTMLExporter) processMultimediaItem(buf *bytes.Buffer, item models.Item
|
||||
buf.WriteString(" <h4>Media Content</h4>\n")
|
||||
for _, subItem := range item.Items {
|
||||
if subItem.Title != "" {
|
||||
buf.WriteString(fmt.Sprintf(" <h5>%s</h5>\n", subItem.Title))
|
||||
fmt.Fprintf(buf, " <h5>%s</h5>\n", subItem.Title)
|
||||
}
|
||||
if subItem.Media != nil {
|
||||
if subItem.Media.Video != nil {
|
||||
buf.WriteString(" <div class=\"media-info\">\n")
|
||||
buf.WriteString(fmt.Sprintf(" <p><strong>Video:</strong> %s</p>\n", html.EscapeString(subItem.Media.Video.OriginalUrl)))
|
||||
fmt.Fprintf(buf, " <p><strong>Video:</strong> %s</p>\n", html.EscapeString(subItem.Media.Video.OriginalUrl))
|
||||
if subItem.Media.Video.Duration > 0 {
|
||||
buf.WriteString(fmt.Sprintf(" <p><strong>Duration:</strong> %d seconds</p>\n", subItem.Media.Video.Duration))
|
||||
fmt.Fprintf(buf, " <p><strong>Duration:</strong> %d seconds</p>\n", subItem.Media.Video.Duration)
|
||||
}
|
||||
buf.WriteString(" </div>\n")
|
||||
}
|
||||
}
|
||||
if subItem.Caption != "" {
|
||||
buf.WriteString(fmt.Sprintf(" <div><em>%s</em></div>\n", subItem.Caption))
|
||||
fmt.Fprintf(buf, " <div><em>%s</em></div>\n", subItem.Caption)
|
||||
}
|
||||
}
|
||||
buf.WriteString(" </div>\n\n")
|
||||
@ -409,11 +409,11 @@ func (e *HTMLExporter) processImageItem(buf *bytes.Buffer, item models.Item) {
|
||||
for _, subItem := range item.Items {
|
||||
if subItem.Media != nil && subItem.Media.Image != nil {
|
||||
buf.WriteString(" <div class=\"media-info\">\n")
|
||||
buf.WriteString(fmt.Sprintf(" <p><strong>Image:</strong> %s</p>\n", html.EscapeString(subItem.Media.Image.OriginalUrl)))
|
||||
fmt.Fprintf(buf, " <p><strong>Image:</strong> %s</p>\n", html.EscapeString(subItem.Media.Image.OriginalUrl))
|
||||
buf.WriteString(" </div>\n")
|
||||
}
|
||||
if subItem.Caption != "" {
|
||||
buf.WriteString(fmt.Sprintf(" <div><em>%s</em></div>\n", subItem.Caption))
|
||||
fmt.Fprintf(buf, " <div><em>%s</em></div>\n", subItem.Caption)
|
||||
}
|
||||
}
|
||||
buf.WriteString(" </div>\n\n")
|
||||
@ -425,10 +425,10 @@ func (e *HTMLExporter) processInteractiveItem(buf *bytes.Buffer, item models.Ite
|
||||
buf.WriteString(" <h4>Interactive Content</h4>\n")
|
||||
for _, subItem := range item.Items {
|
||||
if subItem.Title != "" {
|
||||
buf.WriteString(fmt.Sprintf(" <p><strong>%s</strong></p>\n", subItem.Title))
|
||||
fmt.Fprintf(buf, " <p><strong>%s</strong></p>\n", subItem.Title)
|
||||
}
|
||||
if subItem.Paragraph != "" {
|
||||
buf.WriteString(fmt.Sprintf(" <div>%s</div>\n", subItem.Paragraph))
|
||||
fmt.Fprintf(buf, " <div>%s</div>\n", subItem.Paragraph)
|
||||
}
|
||||
}
|
||||
buf.WriteString(" </div>\n\n")
|
||||
@ -444,7 +444,7 @@ func (e *HTMLExporter) processUnknownItem(buf *bytes.Buffer, item models.Item) {
|
||||
if len(item.Items) > 0 {
|
||||
buf.WriteString(" <div class=\"item unknown-item\">\n")
|
||||
caser := cases.Title(language.English)
|
||||
buf.WriteString(fmt.Sprintf(" <h4>%s Content</h4>\n", caser.String(item.Type)))
|
||||
fmt.Fprintf(buf, " <h4>%s Content</h4>\n", caser.String(item.Type))
|
||||
for _, subItem := range item.Items {
|
||||
e.processGenericSubItem(buf, subItem)
|
||||
}
|
||||
@ -455,10 +455,10 @@ func (e *HTMLExporter) processUnknownItem(buf *bytes.Buffer, item models.Item) {
|
||||
// processGenericSubItem processes sub-items for unknown types
|
||||
func (e *HTMLExporter) processGenericSubItem(buf *bytes.Buffer, subItem models.SubItem) {
|
||||
if subItem.Title != "" {
|
||||
buf.WriteString(fmt.Sprintf(" <p><strong>%s</strong></p>\n", subItem.Title))
|
||||
fmt.Fprintf(buf, " <p><strong>%s</strong></p>\n", subItem.Title)
|
||||
}
|
||||
if subItem.Paragraph != "" {
|
||||
buf.WriteString(fmt.Sprintf(" <div>%s</div>\n", subItem.Paragraph))
|
||||
fmt.Fprintf(buf, " <div>%s</div>\n", subItem.Paragraph)
|
||||
}
|
||||
}
|
||||
|
||||
@ -472,7 +472,7 @@ func (e *HTMLExporter) processAnswers(buf *bytes.Buffer, answers []models.Answer
|
||||
if answer.Correct {
|
||||
cssClass = " class=\"correct-answer\""
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf(" <li%s>%s</li>\n", cssClass, html.EscapeString(answer.Title)))
|
||||
fmt.Fprintf(buf, " <li%s>%s</li>\n", cssClass, html.EscapeString(answer.Title))
|
||||
}
|
||||
buf.WriteString(" </ol>\n")
|
||||
buf.WriteString(" </div>\n")
|
||||
|
||||
@ -844,8 +844,10 @@ func BenchmarkHTMLExporter_Export(b *testing.B) {
|
||||
for b.Loop() {
|
||||
outputPath := filepath.Join(tempDir, "benchmark-course.html")
|
||||
_ = exporter.Export(course, outputPath)
|
||||
// Clean up for next iteration
|
||||
os.Remove(outputPath)
|
||||
// Clean up for next iteration. Remove errors are ignored because we've already
|
||||
// benchmarked the export operation; cleanup failures don't affect the benchmark
|
||||
// measurements or the validity of the next iteration's export.
|
||||
_ = os.Remove(outputPath)
|
||||
}
|
||||
}
|
||||
|
||||
@ -919,6 +921,8 @@ func BenchmarkHTMLExporter_ComplexCourse(b *testing.B) {
|
||||
for b.Loop() {
|
||||
outputPath := filepath.Join(tempDir, "benchmark-complex.html")
|
||||
_ = exporter.Export(course, outputPath)
|
||||
os.Remove(outputPath)
|
||||
// Remove errors are ignored because we're only benchmarking the export
|
||||
// operation itself; cleanup failures don't affect the benchmark metrics.
|
||||
_ = os.Remove(outputPath)
|
||||
}
|
||||
}
|
||||
|
||||
@ -138,13 +138,13 @@ func (e *MarkdownExporter) processTextItem(buf *bytes.Buffer, item models.Item,
|
||||
if subItem.Heading != "" {
|
||||
heading := e.htmlCleaner.CleanHTML(subItem.Heading)
|
||||
if heading != "" {
|
||||
buf.WriteString(fmt.Sprintf("%s %s\n\n", headingPrefix, heading))
|
||||
fmt.Fprintf(buf, "%s %s\n\n", headingPrefix, heading)
|
||||
}
|
||||
}
|
||||
if subItem.Paragraph != "" {
|
||||
paragraph := e.htmlCleaner.CleanHTML(subItem.Paragraph)
|
||||
if paragraph != "" {
|
||||
buf.WriteString(fmt.Sprintf("%s\n\n", paragraph))
|
||||
fmt.Fprintf(buf, "%s\n\n", paragraph)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -156,7 +156,7 @@ func (e *MarkdownExporter) processListItem(buf *bytes.Buffer, item models.Item)
|
||||
if subItem.Paragraph != "" {
|
||||
paragraph := e.htmlCleaner.CleanHTML(subItem.Paragraph)
|
||||
if paragraph != "" {
|
||||
buf.WriteString(fmt.Sprintf("- %s\n", paragraph))
|
||||
fmt.Fprintf(buf, "- %s\n", paragraph)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -165,7 +165,7 @@ func (e *MarkdownExporter) processListItem(buf *bytes.Buffer, item models.Item)
|
||||
|
||||
// processMultimediaItem handles multimedia content including videos and images
|
||||
func (e *MarkdownExporter) processMultimediaItem(buf *bytes.Buffer, item models.Item, headingPrefix string) {
|
||||
buf.WriteString(fmt.Sprintf("%s Media Content\n\n", headingPrefix))
|
||||
fmt.Fprintf(buf, "%s Media Content\n\n", headingPrefix)
|
||||
for _, subItem := range item.Items {
|
||||
e.processMediaSubItem(buf, subItem)
|
||||
}
|
||||
@ -180,16 +180,16 @@ func (e *MarkdownExporter) processMediaSubItem(buf *bytes.Buffer, subItem models
|
||||
}
|
||||
if subItem.Caption != "" {
|
||||
caption := e.htmlCleaner.CleanHTML(subItem.Caption)
|
||||
buf.WriteString(fmt.Sprintf("*%s*\n", caption))
|
||||
fmt.Fprintf(buf, "*%s*\n", caption)
|
||||
}
|
||||
}
|
||||
|
||||
// processVideoMedia processes video media content
|
||||
func (e *MarkdownExporter) processVideoMedia(buf *bytes.Buffer, media *models.Media) {
|
||||
if media.Video != nil {
|
||||
buf.WriteString(fmt.Sprintf("**Video**: %s\n", media.Video.OriginalUrl))
|
||||
fmt.Fprintf(buf, "**Video**: %s\n", media.Video.OriginalUrl)
|
||||
if media.Video.Duration > 0 {
|
||||
buf.WriteString(fmt.Sprintf("**Duration**: %d seconds\n", media.Video.Duration))
|
||||
fmt.Fprintf(buf, "**Duration**: %d seconds\n", media.Video.Duration)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -197,20 +197,20 @@ func (e *MarkdownExporter) processVideoMedia(buf *bytes.Buffer, media *models.Me
|
||||
// processImageMedia processes image media content
|
||||
func (e *MarkdownExporter) processImageMedia(buf *bytes.Buffer, media *models.Media) {
|
||||
if media.Image != nil {
|
||||
buf.WriteString(fmt.Sprintf("**Image**: %s\n", media.Image.OriginalUrl))
|
||||
fmt.Fprintf(buf, "**Image**: %s\n", media.Image.OriginalUrl)
|
||||
}
|
||||
}
|
||||
|
||||
// processImageItem handles standalone image items
|
||||
func (e *MarkdownExporter) processImageItem(buf *bytes.Buffer, item models.Item, headingPrefix string) {
|
||||
buf.WriteString(fmt.Sprintf("%s Image\n\n", headingPrefix))
|
||||
fmt.Fprintf(buf, "%s Image\n\n", headingPrefix)
|
||||
for _, subItem := range item.Items {
|
||||
if subItem.Media != nil && subItem.Media.Image != nil {
|
||||
buf.WriteString(fmt.Sprintf("**Image**: %s\n", subItem.Media.Image.OriginalUrl))
|
||||
fmt.Fprintf(buf, "**Image**: %s\n", subItem.Media.Image.OriginalUrl)
|
||||
}
|
||||
if subItem.Caption != "" {
|
||||
caption := e.htmlCleaner.CleanHTML(subItem.Caption)
|
||||
buf.WriteString(fmt.Sprintf("*%s*\n", caption))
|
||||
fmt.Fprintf(buf, "*%s*\n", caption)
|
||||
}
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
@ -218,7 +218,7 @@ func (e *MarkdownExporter) processImageItem(buf *bytes.Buffer, item models.Item,
|
||||
|
||||
// processKnowledgeCheckItem handles quiz questions and knowledge checks
|
||||
func (e *MarkdownExporter) processKnowledgeCheckItem(buf *bytes.Buffer, item models.Item, headingPrefix string) {
|
||||
buf.WriteString(fmt.Sprintf("%s Knowledge Check\n\n", headingPrefix))
|
||||
fmt.Fprintf(buf, "%s Knowledge Check\n\n", headingPrefix)
|
||||
for _, subItem := range item.Items {
|
||||
e.processQuestionSubItem(buf, subItem)
|
||||
}
|
||||
@ -229,14 +229,14 @@ func (e *MarkdownExporter) processKnowledgeCheckItem(buf *bytes.Buffer, item mod
|
||||
func (e *MarkdownExporter) processQuestionSubItem(buf *bytes.Buffer, subItem models.SubItem) {
|
||||
if subItem.Title != "" {
|
||||
title := e.htmlCleaner.CleanHTML(subItem.Title)
|
||||
buf.WriteString(fmt.Sprintf("**Question**: %s\n\n", title))
|
||||
fmt.Fprintf(buf, "**Question**: %s\n\n", title)
|
||||
}
|
||||
|
||||
e.processAnswers(buf, subItem.Answers)
|
||||
|
||||
if subItem.Feedback != "" {
|
||||
feedback := e.htmlCleaner.CleanHTML(subItem.Feedback)
|
||||
buf.WriteString(fmt.Sprintf("\n**Feedback**: %s\n", feedback))
|
||||
fmt.Fprintf(buf, "\n**Feedback**: %s\n", feedback)
|
||||
}
|
||||
}
|
||||
|
||||
@ -248,17 +248,17 @@ func (e *MarkdownExporter) processAnswers(buf *bytes.Buffer, answers []models.An
|
||||
if answer.Correct {
|
||||
correctMark = " ✓"
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("%d. %s%s\n", i+1, answer.Title, correctMark))
|
||||
fmt.Fprintf(buf, "%d. %s%s\n", i+1, answer.Title, correctMark)
|
||||
}
|
||||
}
|
||||
|
||||
// processInteractiveItem handles interactive content
|
||||
func (e *MarkdownExporter) processInteractiveItem(buf *bytes.Buffer, item models.Item, headingPrefix string) {
|
||||
buf.WriteString(fmt.Sprintf("%s Interactive Content\n\n", headingPrefix))
|
||||
fmt.Fprintf(buf, "%s Interactive Content\n\n", headingPrefix)
|
||||
for _, subItem := range item.Items {
|
||||
if subItem.Title != "" {
|
||||
title := e.htmlCleaner.CleanHTML(subItem.Title)
|
||||
buf.WriteString(fmt.Sprintf("**%s**\n\n", title))
|
||||
fmt.Fprintf(buf, "**%s**\n\n", title)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -272,7 +272,7 @@ func (e *MarkdownExporter) processDividerItem(buf *bytes.Buffer) {
|
||||
func (e *MarkdownExporter) processUnknownItem(buf *bytes.Buffer, item models.Item, headingPrefix string) {
|
||||
if len(item.Items) > 0 {
|
||||
caser := cases.Title(language.English)
|
||||
buf.WriteString(fmt.Sprintf("%s %s Content\n\n", headingPrefix, caser.String(item.Type)))
|
||||
fmt.Fprintf(buf, "%s %s Content\n\n", headingPrefix, caser.String(item.Type))
|
||||
for _, subItem := range item.Items {
|
||||
e.processGenericSubItem(buf, subItem)
|
||||
}
|
||||
@ -283,10 +283,10 @@ func (e *MarkdownExporter) processUnknownItem(buf *bytes.Buffer, item models.Ite
|
||||
func (e *MarkdownExporter) processGenericSubItem(buf *bytes.Buffer, subItem models.SubItem) {
|
||||
if subItem.Title != "" {
|
||||
title := e.htmlCleaner.CleanHTML(subItem.Title)
|
||||
buf.WriteString(fmt.Sprintf("**%s**\n\n", title))
|
||||
fmt.Fprintf(buf, "**%s**\n\n", title)
|
||||
}
|
||||
if subItem.Paragraph != "" {
|
||||
paragraph := e.htmlCleaner.CleanHTML(subItem.Paragraph)
|
||||
buf.WriteString(fmt.Sprintf("%s\n\n", paragraph))
|
||||
fmt.Fprintf(buf, "%s\n\n", paragraph)
|
||||
}
|
||||
}
|
||||
|
||||
@ -664,8 +664,10 @@ func BenchmarkMarkdownExporter_Export(b *testing.B) {
|
||||
for b.Loop() {
|
||||
outputPath := filepath.Join(tempDir, "benchmark-course.md")
|
||||
_ = exporter.Export(course, outputPath)
|
||||
// Clean up for next iteration
|
||||
os.Remove(outputPath)
|
||||
// Clean up for next iteration. Remove errors are ignored because we've already
|
||||
// benchmarked the export operation; cleanup failures don't affect the benchmark
|
||||
// measurements or the validity of the next iteration's export.
|
||||
_ = os.Remove(outputPath)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -60,7 +60,15 @@ func (p *ArticulateParser) FetchCourse(uri string) (*models.Course, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch course data: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
// Ensure response body is closed even if ReadAll fails. Close errors are logged
|
||||
// but not fatal since the body content has already been read and parsed. In the
|
||||
// context of HTTP responses, the body must be closed to release the underlying
|
||||
// connection, but a close error doesn't invalidate the data already consumed.
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: failed to close response body: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("API returned status %d", resp.StatusCode)
|
||||
|
||||
@ -161,7 +161,10 @@ func TestArticulateParser_FetchCourse_InvalidJSON(t *testing.T) {
|
||||
// Create test server that returns invalid JSON
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte("invalid json"))
|
||||
// Write is used for its side effect; the test verifies error handling on
|
||||
// the client side, not whether the write succeeds. Ignore the error since
|
||||
// httptest.ResponseWriter writes are rarely problematic in test contexts.
|
||||
_, _ = w.Write([]byte("invalid json"))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
|
||||
125
main_test.go
125
main_test.go
@ -124,13 +124,16 @@ func TestRunWithInsufficientArgs(t *testing.T) {
|
||||
// Run the function
|
||||
exitCode := run(tt.args)
|
||||
|
||||
// Restore stdout
|
||||
w.Close()
|
||||
// Restore stdout. Close errors are ignored: we've already captured the
|
||||
// output before closing, and any close error doesn't affect test validity.
|
||||
_ = w.Close()
|
||||
os.Stdout = oldStdout
|
||||
|
||||
// Read captured output
|
||||
// Read captured output. Copy errors are ignored: in this test context,
|
||||
// reading from a pipe that was just closed is not expected to fail, and
|
||||
// we're verifying the captured output regardless.
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, r)
|
||||
_, _ = io.Copy(&buf, r)
|
||||
output := buf.String()
|
||||
|
||||
// Verify exit code
|
||||
@ -165,13 +168,15 @@ func TestRunWithHelpFlags(t *testing.T) {
|
||||
args := []string{"articulate-parser", flag}
|
||||
exitCode := run(args)
|
||||
|
||||
// Restore stdout
|
||||
w.Close()
|
||||
// Restore stdout. Close errors are ignored: the pipe write end is already
|
||||
// closed before reading, and any close error doesn't affect the test.
|
||||
_ = w.Close()
|
||||
os.Stdout = oldStdout
|
||||
|
||||
// Read captured output
|
||||
// Read captured output. Copy errors are ignored: we successfully wrote
|
||||
// the help output to the pipe and can verify it regardless of close semantics.
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, r)
|
||||
_, _ = io.Copy(&buf, r)
|
||||
output := buf.String()
|
||||
|
||||
// Verify exit code is 0 (success)
|
||||
@ -214,13 +219,15 @@ func TestRunWithVersionFlags(t *testing.T) {
|
||||
args := []string{"articulate-parser", flag}
|
||||
exitCode := run(args)
|
||||
|
||||
// Restore stdout
|
||||
w.Close()
|
||||
// Restore stdout. Close errors are ignored: the version output has already
|
||||
// been written and we're about to read it; close semantics don't affect correctness.
|
||||
_ = w.Close()
|
||||
os.Stdout = oldStdout
|
||||
|
||||
// Read captured output
|
||||
// Read captured output. Copy errors are ignored: the output was successfully
|
||||
// produced and we can verify its contents regardless of any I/O edge cases.
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, r)
|
||||
_, _ = io.Copy(&buf, r)
|
||||
output := buf.String()
|
||||
|
||||
// Verify exit code is 0 (success)
|
||||
@ -264,20 +271,26 @@ func TestRunWithInvalidFile(t *testing.T) {
|
||||
args := []string{"articulate-parser", "nonexistent-file.json", "markdown", "output.md"}
|
||||
exitCode := run(args)
|
||||
|
||||
// Restore stdout/stderr and log output
|
||||
stdoutW.Close()
|
||||
stderrW.Close()
|
||||
// Restore stdout/stderr and log output. Close errors are ignored: we've already
|
||||
// written all error messages to these pipes before closing them, and the test
|
||||
// only cares about verifying the captured output.
|
||||
_ = stdoutW.Close()
|
||||
_ = stderrW.Close()
|
||||
os.Stdout = oldStdout
|
||||
os.Stderr = oldStderr
|
||||
log.SetOutput(oldLogOutput)
|
||||
|
||||
// Read captured output
|
||||
// Read captured output. Copy errors are ignored: the error messages have been
|
||||
// successfully written to the pipes, and we can verify the output content
|
||||
// regardless of any edge cases in pipe closure or I/O completion.
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
io.Copy(&stdoutBuf, stdoutR)
|
||||
io.Copy(&stderrBuf, stderrR)
|
||||
_, _ = io.Copy(&stdoutBuf, stdoutR)
|
||||
_, _ = io.Copy(&stderrBuf, stderrR)
|
||||
|
||||
stdoutR.Close()
|
||||
stderrR.Close()
|
||||
// Close read ends of pipes. Errors ignored: we've already consumed all data
|
||||
// from these pipes, and close errors don't affect test assertions.
|
||||
_ = stdoutR.Close()
|
||||
_ = stderrR.Close()
|
||||
|
||||
// Verify exit code
|
||||
if exitCode != 1 {
|
||||
@ -311,20 +324,26 @@ func TestRunWithInvalidURI(t *testing.T) {
|
||||
args := []string{"articulate-parser", "https://example.com/invalid", "markdown", "output.md"}
|
||||
exitCode := run(args)
|
||||
|
||||
// Restore stdout/stderr and log output
|
||||
stdoutW.Close()
|
||||
stderrW.Close()
|
||||
// Restore stdout/stderr and log output. Close errors are ignored: we've already
|
||||
// written all error messages about the invalid URI to these pipes before closing,
|
||||
// and test correctness only depends on verifying the captured error output.
|
||||
_ = stdoutW.Close()
|
||||
_ = stderrW.Close()
|
||||
os.Stdout = oldStdout
|
||||
os.Stderr = oldStderr
|
||||
log.SetOutput(oldLogOutput)
|
||||
|
||||
// Read captured output
|
||||
// Read captured output. Copy errors are ignored: the error messages have been
|
||||
// successfully written and we can verify the failure output content regardless
|
||||
// of any edge cases in pipe lifecycle or I/O synchronization.
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
io.Copy(&stdoutBuf, stdoutR)
|
||||
io.Copy(&stderrBuf, stderrR)
|
||||
_, _ = io.Copy(&stdoutBuf, stdoutR)
|
||||
_, _ = io.Copy(&stderrBuf, stderrR)
|
||||
|
||||
stdoutR.Close()
|
||||
stderrR.Close()
|
||||
// Close read ends of pipes. Errors ignored: we've already consumed all data
|
||||
// and close errors don't affect the validation of the error output.
|
||||
_ = stdoutR.Close()
|
||||
_ = stderrR.Close()
|
||||
|
||||
// Should fail because the URI is invalid/unreachable
|
||||
if exitCode != 1 {
|
||||
@ -364,16 +383,29 @@ func TestRunWithValidJSONFile(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp file: %v", err)
|
||||
}
|
||||
defer os.Remove(tmpFile.Name())
|
||||
// Ensure temporary test file is cleaned up. Remove errors are ignored because
|
||||
// the test has already used the file for its purpose, and cleanup failures don't
|
||||
// invalidate the test results (the OS will eventually clean up temp files).
|
||||
defer func() {
|
||||
_ = os.Remove(tmpFile.Name())
|
||||
}()
|
||||
|
||||
if _, err := tmpFile.WriteString(testContent); err != nil {
|
||||
t.Fatalf("Failed to write test content: %v", err)
|
||||
}
|
||||
tmpFile.Close()
|
||||
// Close the temporary file. Errors are ignored because we've already written
|
||||
// the test content and the main test logic (loading the file) doesn't depend
|
||||
// on the success of closing this file descriptor.
|
||||
_ = tmpFile.Close()
|
||||
|
||||
// Test successful run with valid file
|
||||
outputFile := "test-output.md"
|
||||
defer os.Remove(outputFile)
|
||||
// Ensure test output file is cleaned up. Remove errors are ignored because the
|
||||
// test has already verified the export succeeded; cleanup failures don't affect
|
||||
// the test assertions.
|
||||
defer func() {
|
||||
_ = os.Remove(outputFile)
|
||||
}()
|
||||
|
||||
// Save original stdout
|
||||
originalStdout := os.Stdout
|
||||
@ -386,13 +418,17 @@ func TestRunWithValidJSONFile(t *testing.T) {
|
||||
args := []string{"articulate-parser", tmpFile.Name(), "markdown", outputFile}
|
||||
exitCode := run(args)
|
||||
|
||||
// Close write end and restore stdout
|
||||
w.Close()
|
||||
// Close write end and restore stdout. Close errors are ignored: we've already
|
||||
// written the success message before closing, and any close error doesn't affect
|
||||
// the validity of the captured output or the test assertions.
|
||||
_ = w.Close()
|
||||
os.Stdout = originalStdout
|
||||
|
||||
// Read captured output
|
||||
// Read captured output. Copy errors are ignored: the success message was
|
||||
// successfully written to the pipe, and we can verify it regardless of any
|
||||
// edge cases in pipe closure or I/O synchronization.
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, r)
|
||||
_, _ = io.Copy(&buf, r)
|
||||
output := buf.String()
|
||||
|
||||
// Verify successful execution
|
||||
@ -439,17 +475,24 @@ func TestRunIntegration(t *testing.T) {
|
||||
args := []string{"articulate-parser", "articulate-sample.json", format.format, format.output}
|
||||
exitCode := run(args)
|
||||
|
||||
// Restore stdout
|
||||
w.Close()
|
||||
// Restore stdout. Close errors are ignored: the export success message
|
||||
// has already been written and we're about to read it; close semantics
|
||||
// don't affect the validity of the captured output.
|
||||
_ = w.Close()
|
||||
os.Stdout = oldStdout
|
||||
|
||||
// Read captured output
|
||||
// Read captured output. Copy errors are ignored: the output was successfully
|
||||
// produced and we can verify its contents regardless of any I/O edge cases.
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, r)
|
||||
_, _ = io.Copy(&buf, r)
|
||||
output := buf.String()
|
||||
|
||||
// Clean up test file
|
||||
defer os.Remove(format.output)
|
||||
// Clean up test file. Remove errors are ignored because the test has
|
||||
// already verified the export succeeded; cleanup failures don't affect
|
||||
// the test assertions.
|
||||
defer func() {
|
||||
_ = os.Remove(format.output)
|
||||
}()
|
||||
|
||||
// Verify successful execution
|
||||
if exitCode != 0 {
|
||||
|
||||
Reference in New Issue
Block a user