3 Commits

Author SHA1 Message Date
2e887a2b31 Update .github/workflows/release.yml
Change how BUILD_TIME is derived

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-05-29 01:13:59 +02:00
cdc9815cee Update dependency-review.yml
Added permission to add pull-request comments
2025-05-29 01:11:06 +02:00
c4086a832f fix: prevent duplicate workflow execution on tag push
- Remove tag trigger from CI workflow to prevent simultaneous execution
- Add Docker build job to release workflow for proper release handling
- CI now only runs on branch pushes and PRs
- Release workflow handles both binary releases and Docker images for tags

Fixes issue where both CI and release workflows would start when pushing vX.X.X tags
2025-05-28 23:03:19 +00:00
51 changed files with 1516 additions and 3691 deletions

View File

@ -8,7 +8,7 @@ updates:
day: 'monday' day: 'monday'
time: '07:00' time: '07:00'
timezone: 'Europe/Amsterdam' timezone: 'Europe/Amsterdam'
open-pull-requests-limit: 2 open-pull-requests-limit: 10
labels: labels:
- 'dependencies' - 'dependencies'
- 'dependencies/github-actions' - 'dependencies/github-actions'
@ -24,7 +24,7 @@ updates:
day: 'monday' day: 'monday'
time: '07:00' time: '07:00'
timezone: 'Europe/Amsterdam' timezone: 'Europe/Amsterdam'
open-pull-requests-limit: 2 open-pull-requests-limit: 10
labels: labels:
- 'dependencies' - 'dependencies'
- 'dependencies/docker' - 'dependencies/docker'
@ -32,7 +32,7 @@ updates:
prefix: 'docker' prefix: 'docker'
include: 'scope' include: 'scope'
groups: groups:
docker: docker-images:
patterns: patterns:
- '*' - '*'
update-types: update-types:
@ -47,7 +47,7 @@ updates:
day: 'monday' day: 'monday'
time: '07:00' time: '07:00'
timezone: 'Europe/Amsterdam' timezone: 'Europe/Amsterdam'
open-pull-requests-limit: 2 open-pull-requests-limit: 10
labels: labels:
- 'dependencies' - 'dependencies'
- 'dependencies/docker-compose' - 'dependencies/docker-compose'
@ -55,7 +55,7 @@ updates:
prefix: 'docker' prefix: 'docker'
include: 'scope' include: 'scope'
groups: groups:
docker: docker-compose:
patterns: patterns:
- '*' - '*'
update-types: update-types:
@ -70,7 +70,7 @@ updates:
day: 'monday' day: 'monday'
time: '07:00' time: '07:00'
timezone: 'Europe/Amsterdam' timezone: 'Europe/Amsterdam'
open-pull-requests-limit: 2 open-pull-requests-limit: 10
labels: labels:
- 'dependencies' - 'dependencies'
- 'dependencies/go' - 'dependencies/go'

View File

@ -10,36 +10,16 @@ jobs:
autofix: autofix:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - uses: actions/checkout@v4
uses: actions/checkout@v5 - uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- name: Install Task # goimports works like gofmt, but also fixes imports.
uses: go-task/setup-task@v1 # see https://pkg.go.dev/golang.org/x/tools/cmd/goimports
- run: go install golang.org/x/tools/cmd/goimports@latest
- run: goimports -w .
# of course we can also do just this instead:
# - run: gofmt -w .
- uses: actions/setup-go@v6 - uses: autofix-ci/action@551dded8c6cc8a1054039c8bc0b8b48c51dfc6ef
with: { go-version-file: 'go.mod' }
- name: Setup go deps
run: |
# Install golangci-lint
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(go env GOPATH)/bin
# Install go-task dependencies
go install golang.org/x/tools/cmd/goimports@latest
- name: Run goimports
run: goimports -w .
- name: Run golangci-lint autofix
run: golangci-lint run --fix
- name: Run golangci-lint format
run: golangci-lint format
- name: Run go mod tidy
run: go mod tidy
- name: Run gopls modernize
run: task modernize
- uses: autofix-ci/action@v1

View File

@ -4,6 +4,7 @@ on:
push: push:
branches: ['master', 'develop'] branches: ['master', 'develop']
pull_request: pull_request:
branches: ['master', 'develop', 'feature/*']
env: env:
REGISTRY: ghcr.io REGISTRY: ghcr.io
@ -14,24 +15,8 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
golangci:
name: lint
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
steps:
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version: stable
- name: golangci-lint
uses: golangci/golangci-lint-action@v8
with: { version: latest }
test: test:
name: Test name: Test
needs: [golangci]
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: write contents: write
@ -42,42 +27,69 @@ jobs:
- 1.22.x - 1.22.x
- 1.23.x - 1.23.x
- 1.24.x - 1.24.x
- 1.25.x
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Set up Go ${{ matrix.go }} - name: Set up Go ${{ matrix.go }}
uses: actions/setup-go@v6 uses: actions/setup-go@v5
with: with:
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
check-latest: true check-latest: true
- name: Install Task - name: Download dependencies with retry
uses: go-task/setup-task@v1 run: |
set -e
echo "Downloading Go dependencies..."
- name: Show build info # Function to download with retry
run: task info download_with_retry() {
local attempt=1
local max_attempts=3
- name: Download dependencies while [ $attempt -le $max_attempts ]; do
run: task deps echo "Attempt $attempt of $max_attempts"
if go mod download; then
echo "Download successful on attempt $attempt"
return 0
else
echo "Download failed on attempt $attempt"
if [ $attempt -lt $max_attempts ]; then
echo "Cleaning cache and retrying..."
go clean -modcache
go clean -cache
sleep 2
fi
attempt=$((attempt + 1))
fi
done
echo "All download attempts failed"
return 1
}
# Try download with retry logic
download_with_retry
echo "Verifying module dependencies..."
go mod verify
echo "Dependencies verified successfully"
- name: Build - name: Build
run: task build run: go build -v ./...
- name: Run tests with enhanced reporting - name: Run tests with enhanced reporting
id: test id: test
run: | run: |
cat >> $GITHUB_STEP_SUMMARY << EOF echo "## 🔧 Test Environment" >> $GITHUB_STEP_SUMMARY
## 🔧 Test Environment echo "- **Go Version:** ${{ matrix.go }}" >> $GITHUB_STEP_SUMMARY
- **Go Version:** ${{ matrix.go }} echo "- **OS:** ubuntu-latest" >> $GITHUB_STEP_SUMMARY
- **OS:** ubuntu-latest echo "- **Timestamp:** $(date -u)" >> $GITHUB_STEP_SUMMARY
- **Timestamp:** $(date -u) echo "" >> $GITHUB_STEP_SUMMARY
EOF
echo "Running tests with coverage..." echo "Running tests with coverage..."
task test:coverage 2>&1 | tee test-output.log go test -v -race -coverprofile=coverage.out ./... 2>&1 | tee test-output.log
# Extract test results for summary # Extract test results for summary
TEST_STATUS=$? TEST_STATUS=$?
@ -87,30 +99,30 @@ jobs:
SKIPPED_TESTS=$(grep -c "--- SKIP:" test-output.log || echo "0") SKIPPED_TESTS=$(grep -c "--- SKIP:" test-output.log || echo "0")
# Generate test summary # Generate test summary
cat >> $GITHUB_STEP_SUMMARY << EOF echo "## 🧪 Test Results (Go ${{ matrix.go }})" >> $GITHUB_STEP_SUMMARY
## 🧪 Test Results (Go ${{ matrix.go }}) echo "" >> $GITHUB_STEP_SUMMARY
echo "| Metric | Value |" >> $GITHUB_STEP_SUMMARY
echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY
echo "| Total Tests | $TOTAL_TESTS |" >> $GITHUB_STEP_SUMMARY
echo "| Passed | ✅ $PASSED_TESTS |" >> $GITHUB_STEP_SUMMARY
echo "| Failed | ❌ $FAILED_TESTS |" >> $GITHUB_STEP_SUMMARY
echo "| Skipped | ⏭️ $SKIPPED_TESTS |" >> $GITHUB_STEP_SUMMARY
echo "| Status | $([ $TEST_STATUS -eq 0 ] && echo "✅ PASSED" || echo "❌ FAILED") |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
| Metric | Value | # Add package breakdown
| ----------- | ----------------------------------------------------------- | echo "### 📦 Package Test Results" >> $GITHUB_STEP_SUMMARY
| Total Tests | $TOTAL_TESTS | echo "" >> $GITHUB_STEP_SUMMARY
| Passed | $PASSED_TESTS | echo "| Package | Status |" >> $GITHUB_STEP_SUMMARY
| Failed | $FAILED_TESTS | echo "|---------|--------|" >> $GITHUB_STEP_SUMMARY
| Skipped | $SKIPPED_TESTS |
| Status | $([ $TEST_STATUS -eq 0 ] && echo "PASSED" || echo "FAILED") |
### 📦 Package Test Results
| Package | Status |
|---------|--------|
EOF
# Extract package results # Extract package results
grep "^ok\|^FAIL" test-output.log | while read -r line; do grep "^ok\|^FAIL" test-output.log | while read line; do
if [[ $line == ok* ]]; then if [[ $line == ok* ]]; then
pkg=$(echo "$line" | awk '{print $2}') pkg=$(echo $line | awk '{print $2}')
echo "| $pkg | ✅ PASS |" >> $GITHUB_STEP_SUMMARY echo "| $pkg | ✅ PASS |" >> $GITHUB_STEP_SUMMARY
elif [[ $line == FAIL* ]]; then elif [[ $line == FAIL* ]]; then
pkg=$(echo "$line" | awk '{print $2}') pkg=$(echo $line | awk '{print $2}')
echo "| $pkg | ❌ FAIL |" >> $GITHUB_STEP_SUMMARY echo "| $pkg | ❌ FAIL |" >> $GITHUB_STEP_SUMMARY
fi fi
done done
@ -119,24 +131,19 @@ jobs:
# Add detailed results if tests failed # Add detailed results if tests failed
if [ $TEST_STATUS -ne 0 ]; then if [ $TEST_STATUS -ne 0 ]; then
cat >> $GITHUB_STEP_SUMMARY << 'EOF' echo "### ❌ Failed Tests Details" >> $GITHUB_STEP_SUMMARY
### ❌ Failed Tests Details echo "" >> $GITHUB_STEP_SUMMARY
``` echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
EOF
grep -A 10 "--- FAIL:" test-output.log | head -100 >> $GITHUB_STEP_SUMMARY grep -A 10 "--- FAIL:" test-output.log | head -100 >> $GITHUB_STEP_SUMMARY
cat >> $GITHUB_STEP_SUMMARY << 'EOF' echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
``` echo "" >> $GITHUB_STEP_SUMMARY
EOF
fi fi
# Set outputs for other steps # Set outputs for other steps
cat >> $GITHUB_OUTPUT << EOF echo "test-status=$TEST_STATUS" >> $GITHUB_OUTPUT
test-status=$TEST_STATUS echo "total-tests=$TOTAL_TESTS" >> $GITHUB_OUTPUT
total-tests=$TOTAL_TESTS echo "passed-tests=$PASSED_TESTS" >> $GITHUB_OUTPUT
passed-tests=$PASSED_TESTS echo "failed-tests=$FAILED_TESTS" >> $GITHUB_OUTPUT
failed-tests=$FAILED_TESTS
EOF
# Exit with the original test status # Exit with the original test status
exit $TEST_STATUS exit $TEST_STATUS
@ -144,34 +151,37 @@ jobs:
- name: Generate coverage report - name: Generate coverage report
if: always() if: always()
run: | run: |
if [ -f coverage/coverage.out ]; then if [ -f coverage.out ]; then
COVERAGE=$(go tool cover -func=coverage/coverage.out | grep total | awk '{print $3}') go tool cover -html=coverage.out -o coverage.html
COVERAGE=$(go tool cover -func=coverage.out | grep total | awk '{print $3}')
cat >> $GITHUB_STEP_SUMMARY << EOF echo "## 📊 Code Coverage (Go ${{ matrix.go }})" >> $GITHUB_STEP_SUMMARY
## 📊 Code Coverage (Go ${{ matrix.go }}) echo "" >> $GITHUB_STEP_SUMMARY
echo "**Total Coverage: $COVERAGE**" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
**Total Coverage: $COVERAGE** # Add coverage by package
echo "<details>" >> $GITHUB_STEP_SUMMARY
<details> echo "<summary>Click to expand 📋 Coverage by Package details</summary>" >> $GITHUB_STEP_SUMMARY
<summary>Click to expand 📋 Coverage by Package details</summary> echo "" >> $GITHUB_STEP_SUMMARY
echo "| Package | Coverage |" >> $GITHUB_STEP_SUMMARY
| Package | Coverage | echo "|---------|----------|" >> $GITHUB_STEP_SUMMARY
| ------- | -------- |
EOF
# Create temporary file for package coverage aggregation # Create temporary file for package coverage aggregation
temp_coverage=$(mktemp) temp_coverage=$(mktemp)
# Extract package-level coverage data # Extract package-level coverage data
go tool cover -func=coverage/coverage.out | grep -v total | while read -r line; do go tool cover -func=coverage.out | grep -v total | while read line; do
if [[ $line == *".go:"* ]]; then if [[ $line == *".go:"* ]]; then
# Extract package path from file path (everything before the filename) # Extract package path from file path (everything before the filename)
filepath=$(echo "$line" | awk '{print $1}') filepath=$(echo "$line" | awk '{print $1}')
pkg_path=$(dirname "$filepath" | sed 's|github.com/kjanat/articulate-parser/||; s|^\./||') pkg_path=$(dirname "$filepath" | sed 's|github.com/kjanat/articulate-parser/||' | sed 's|^\./||')
coverage=$(echo "$line" | awk '{print $3}' | sed 's/%//') coverage=$(echo "$line" | awk '{print $3}' | sed 's/%//')
# Use root package if no subdirectory # Use root package if no subdirectory
[[ "$pkg_path" == "." || "$pkg_path" == "" ]] && pkg_path="root" if [[ "$pkg_path" == "." || "$pkg_path" == "" ]]; then
pkg_path="root"
fi
echo "$pkg_path $coverage" >> "$temp_coverage" echo "$pkg_path $coverage" >> "$temp_coverage"
fi fi
@ -179,7 +189,7 @@ jobs:
# Aggregate coverage by package (average) # Aggregate coverage by package (average)
awk '{ awk '{
packages[$1] += $2 packages[$1] += $2;
counts[$1]++ counts[$1]++
} }
END { END {
@ -187,97 +197,85 @@ jobs:
avg = packages[pkg] / counts[pkg] avg = packages[pkg] / counts[pkg]
printf "| %s | %.1f%% |\n", pkg, avg printf "| %s | %.1f%% |\n", pkg, avg
} }
}' "$temp_coverage" | sort >> $GITHUB_STEP_SUMMARY }' $temp_coverage | sort >> $GITHUB_STEP_SUMMARY
rm -f "$temp_coverage" rm -f $temp_coverage
cat >> $GITHUB_STEP_SUMMARY << 'EOF' echo "</details>" >> $GITHUB_STEP_SUMMARY
</details> echo "" >> $GITHUB_STEP_SUMMARY
EOF
else else
cat >> $GITHUB_STEP_SUMMARY << 'EOF' echo "## ⚠️ Coverage Report" >> $GITHUB_STEP_SUMMARY
## ⚠️ Coverage Report echo "No coverage file generated" >> $GITHUB_STEP_SUMMARY
No coverage file generated echo "" >> $GITHUB_STEP_SUMMARY
EOF
fi fi
- name: Upload test artifacts - name: Upload test artifacts
if: failure() if: failure()
uses: actions/upload-artifact@v5 uses: actions/upload-artifact@v4
with: with:
name: test-results-go-${{ matrix.go }} name: test-results-go-${{ matrix.go }}
path: | path: |
test-output.log test-output.log
coverage/ coverage.out
coverage.html
retention-days: 7 retention-days: 7
- name: Run linters - name: Run go vet
run: | run: |
# Initialize summary echo "## 🔍 Static Analysis (Go ${{ matrix.go }})" >> $GITHUB_STEP_SUMMARY
cat >> $GITHUB_STEP_SUMMARY << EOF echo "" >> $GITHUB_STEP_SUMMARY
## 🔍 Static Analysis (Go ${{ matrix.go }})
EOF VET_OUTPUT=$(go vet ./... 2>&1 || echo "")
# Run go vet
VET_OUTPUT=$(task lint:vet 2>&1 || echo "")
VET_STATUS=$? VET_STATUS=$?
if [ $VET_STATUS -eq 0 ]; then if [ $VET_STATUS -eq 0 ]; then
echo "✅ **go vet:** No issues found" >> $GITHUB_STEP_SUMMARY echo "✅ **go vet:** No issues found" >> $GITHUB_STEP_SUMMARY
else else
cat >> $GITHUB_STEP_SUMMARY << 'EOF' echo "❌ **go vet:** Issues found" >> $GITHUB_STEP_SUMMARY
❌ **go vet:** Issues found echo "" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
```
EOF
echo "$VET_OUTPUT" >> $GITHUB_STEP_SUMMARY echo "$VET_OUTPUT" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
fi fi
echo "" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY
# Run go fmt check exit $VET_STATUS
FMT_OUTPUT=$(task lint:fmt 2>&1 || echo "")
FMT_STATUS=$?
if [ $FMT_STATUS -eq 0 ]; then - name: Run go fmt
run: |
FMT_OUTPUT=$(gofmt -s -l . 2>&1 || echo "")
if [ -z "$FMT_OUTPUT" ]; then
echo "✅ **go fmt:** All files properly formatted" >> $GITHUB_STEP_SUMMARY echo "✅ **go fmt:** All files properly formatted" >> $GITHUB_STEP_SUMMARY
else else
cat >> $GITHUB_STEP_SUMMARY << 'EOF' echo "❌ **go fmt:** Files need formatting" >> $GITHUB_STEP_SUMMARY
❌ **go fmt:** Files need formatting echo "" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
```
EOF
echo "$FMT_OUTPUT" >> $GITHUB_STEP_SUMMARY echo "$FMT_OUTPUT" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
exit 1
fi fi
# Exit with error if any linter failed
[ $VET_STATUS -eq 0 ] && [ $FMT_STATUS -eq 0 ] || exit 1
- name: Job Summary - name: Job Summary
if: always() if: always()
run: | run: |
cat >> $GITHUB_STEP_SUMMARY << 'EOF' echo "## 📋 Job Summary (Go ${{ matrix.go }})" >> $GITHUB_STEP_SUMMARY
## 📋 Job Summary (Go ${{ matrix.go }}) echo "" >> $GITHUB_STEP_SUMMARY
echo "| Step | Status |" >> $GITHUB_STEP_SUMMARY
| Step | Status | echo "|------|--------|" >> $GITHUB_STEP_SUMMARY
| --------------- | --------------------------------------------------------------- | echo "| Dependencies | ✅ Success |" >> $GITHUB_STEP_SUMMARY
| Dependencies | Success | echo "| Build | ✅ Success |" >> $GITHUB_STEP_SUMMARY
| Build | Success | echo "| Tests | ${{ steps.test.outcome == 'success' && '✅ Success' || '❌ Failed' }} |" >> $GITHUB_STEP_SUMMARY
| Tests | ${{ steps.test.outcome == 'success' && 'Success' || 'Failed' }} | echo "| Coverage | ${{ job.status == 'success' && '✅ Generated' || '⚠️ Partial' }} |" >> $GITHUB_STEP_SUMMARY
| Coverage | ${{ job.status == 'success' && 'Generated' || 'Partial' }} | echo "| Static Analysis | ${{ job.status == 'success' && '✅ Clean' || '❌ Issues' }} |" >> $GITHUB_STEP_SUMMARY
| Static Analysis | ${{ job.status == 'success' && 'Clean' || 'Issues' }} | echo "| Code Formatting | ${{ job.status == 'success' && 'Clean' || 'Issues' }} |" >> $GITHUB_STEP_SUMMARY
| Code Formatting | ${{ job.status == 'success' && 'Clean' || 'Issues' }} | echo "" >> $GITHUB_STEP_SUMMARY
EOF
- name: Upload coverage reports to Codecov - name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5 uses: codecov/codecov-action@v5
with: with:
files: ./coverage/coverage.out
flags: Go ${{ matrix.go }} flags: Go ${{ matrix.go }}
slug: kjanat/articulate-parser slug: kjanat/articulate-parser
token: ${{ secrets.CODECOV_TOKEN }} token: ${{ secrets.CODECOV_TOKEN }}
@ -297,41 +295,42 @@ jobs:
contents: read contents: read
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
- name: Install Task
uses: go-task/setup-task@v1
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
- name: Build Docker image using Task - name: Capture build date
run: task docker:build run: echo "BUILD_TIME=$(git log -1 --format=%cd --date=iso-strict)" >> $GITHUB_ENV
- name: Test Docker image using Task - name: Build Docker image (test)
uses: docker/build-push-action@v6
with:
context: .
push: false
load: true
tags: test:latest
build-args: |
VERSION=test
BUILD_TIME=${{ env.BUILD_TIME }}
GIT_COMMIT=${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Test Docker image
run: | run: |
cat >> $GITHUB_STEP_SUMMARY << 'EOF' echo "## 🧪 Docker Image Tests" >> $GITHUB_STEP_SUMMARY
## 🧪 Docker Image Tests echo "" >> $GITHUB_STEP_SUMMARY
EOF
# Run Task docker test
task docker:test
# Test that the image runs and shows help
echo "**Testing help command:**" >> $GITHUB_STEP_SUMMARY echo "**Testing help command:**" >> $GITHUB_STEP_SUMMARY
echo '```terminaloutput' >> $GITHUB_STEP_SUMMARY echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
docker run --rm articulate-parser:latest --help >> $GITHUB_STEP_SUMMARY docker run --rm test:latest --help >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY
# Test image size # Test image size
IMAGE_SIZE=$(docker image inspect articulate-parser:latest --format='{{.Size}}' | numfmt --to=iec-i --suffix=B) IMAGE_SIZE=$(docker image inspect test:latest --format='{{.Size}}' | numfmt --to=iec-i --suffix=B)
echo "**Image size:** $IMAGE_SIZE" >> $GITHUB_STEP_SUMMARY echo "**Image size:** $IMAGE_SIZE" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY
@ -343,7 +342,7 @@ jobs:
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'
steps: steps:
- name: 'Checkout Repository' - name: 'Checkout Repository'
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: 'Dependency Review' - name: 'Dependency Review'
uses: actions/dependency-review-action@v4 uses: actions/dependency-review-action@v4
@ -351,20 +350,19 @@ jobs:
fail-on-severity: moderate fail-on-severity: moderate
comment-summary-in-pr: always comment-summary-in-pr: always
docker: docker:
name: Docker Build & Push name: Docker Build & Push
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: read contents: read
packages: write packages: write
needs: [test, docker-test, dependency-review] needs: ['test']
if: | if: github.event_name == 'push' && (github.ref == 'refs/heads/master' || github.ref == 'refs/heads/develop' || startsWith(github.ref, 'refs/heads/feature/docker'))
github.event_name == 'push' && (github.ref == 'refs/heads/master' ||
github.ref == 'refs/heads/develop' ||
startsWith(github.ref, 'refs/heads/feature/docker'))
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
@ -436,39 +434,35 @@ jobs:
- name: Generate Docker summary - name: Generate Docker summary
run: | run: |
cat >> $GITHUB_STEP_SUMMARY << 'EOF' echo "## 🐳 Docker Build Summary" >> $GITHUB_STEP_SUMMARY
## 🐳 Docker Build Summary echo "" >> $GITHUB_STEP_SUMMARY
echo "**Image:** \`ghcr.io/${{ github.repository }}\`" >> $GITHUB_STEP_SUMMARY
**Image:** `ghcr.io/${{ github.repository }}` echo "" >> $GITHUB_STEP_SUMMARY
echo "**Tags built:**" >> $GITHUB_STEP_SUMMARY
**Tags built:** echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
echo "${{ steps.meta.outputs.tags }}" >> $GITHUB_STEP_SUMMARY
```text echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
${{ steps.meta.outputs.tags }} echo "" >> $GITHUB_STEP_SUMMARY
``` echo "**Features:**" >> $GITHUB_STEP_SUMMARY
echo "- **Platforms:** linux/amd64, linux/arm64, linux/arm/v7, linux/386, linux/ppc64le, linux/s390x" >> $GITHUB_STEP_SUMMARY
**Features:** echo "- **Architecture optimization:** ✅ Native compilation for each platform" >> $GITHUB_STEP_SUMMARY
- **Platforms:** linux/amd64, linux/arm64, linux/arm/v7, linux/386, linux/ppc64le, linux/s390x echo "- **Multi-arch image description:** ✅ Enabled" >> $GITHUB_STEP_SUMMARY
- **Architecture optimization:** Native compilation for each platform echo "- **SBOM (Software Bill of Materials):** ✅ Generated" >> $GITHUB_STEP_SUMMARY
- **Multi-arch image description:** Enabled echo "- **Provenance attestation:** ✅ Generated" >> $GITHUB_STEP_SUMMARY
- **SBOM (Software Bill of Materials):** Generated echo "- **Security scanning:** Ready for vulnerability analysis" >> $GITHUB_STEP_SUMMARY
- **Provenance attestation:** Generated echo "" >> $GITHUB_STEP_SUMMARY
- **Security scanning:** Ready for vulnerability analysis echo "**Usage:**" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY
**Usage:** echo "# Pull the image" >> $GITHUB_STEP_SUMMARY
echo "docker pull ghcr.io/${{ github.repository }}:latest" >> $GITHUB_STEP_SUMMARY
```bash echo "" >> $GITHUB_STEP_SUMMARY
# Pull the image echo "# Run with help" >> $GITHUB_STEP_SUMMARY
docker pull ghcr.io/${{ github.repository }}:latest echo "docker run --rm ghcr.io/${{ github.repository }}:latest --help" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Run with help echo "# Process a local file (mount current directory)" >> $GITHUB_STEP_SUMMARY
docker run --rm ghcr.io/${{ github.repository }}:latest --help echo "docker run --rm -v \$(pwd):/workspace ghcr.io/${{ github.repository }}:latest /workspace/input.json markdown /workspace/output.md" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
# Process a local file (mount current directory) echo "" >> $GITHUB_STEP_SUMMARY
docker run --rm -v $(pwd):/workspace ghcr.io/${{ github.repository }}:latest /workspace/input.json markdown /workspace/output.md
```
EOF
# Security and quality analysis workflows # Security and quality analysis workflows
codeql-analysis: codeql-analysis:

View File

@ -61,7 +61,7 @@ jobs:
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v5 uses: actions/checkout@v4
# Add any setup steps before running the `github/codeql-action/init` action. # Add any setup steps before running the `github/codeql-action/init` action.
# This includes steps like installing compilers or runtimes (`actions/setup-node` # This includes steps like installing compilers or runtimes (`actions/setup-node`
@ -71,7 +71,7 @@ jobs:
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v4 uses: github/codeql-action/init@v3
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }} build-mode: ${{ matrix.build-mode }}
@ -99,6 +99,6 @@ jobs:
exit 1 exit 1
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v4 uses: github/codeql-action/analyze@v3
with: with:
category: "/language:${{matrix.language}}" category: "/language:${{matrix.language}}"

View File

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: 'Checkout Repository' - name: 'Checkout Repository'
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: 'Dependency Review' - name: 'Dependency Review'
uses: actions/dependency-review-action@v4 uses: actions/dependency-review-action@v4

View File

@ -3,7 +3,7 @@ name: Release
on: on:
push: push:
tags: tags:
- "v*.*.*" - 'v*.*.*'
workflow_call: workflow_call:
env: env:
@ -20,14 +20,14 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v6 uses: actions/setup-go@v5
with: with:
go-version-file: "go.mod" go-version-file: 'go.mod'
check-latest: true check-latest: true
- name: Run tests - name: Run tests
@ -88,7 +88,7 @@ jobs:
packages: write packages: write
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3

8
.gitignore vendored
View File

@ -69,11 +69,3 @@ main_coverage
# Editors # Editors
.vscode/ .vscode/
.idea/ .idea/
.task/
**/*.local.*
.claude/
NUL

View File

@ -1,388 +0,0 @@
# golangci-lint configuration for articulate-parser
# https://golangci-lint.run/usage/configuration/
version: "2"
# Options for analysis running
run:
# Timeout for total work
timeout: 5m
# Skip directories (not allowed in config v2, will use issues exclude instead)
# Go version
go: "1.24"
# Include test files
tests: true
# Use Go module mode
modules-download-mode: readonly
# Output configuration
output:
# Format of output
formats:
text:
print-issued-lines: true
print-linter-name: true
# Sort results
sort-order:
- linter
- severity
- file
# Show statistics
show-stats: true
# Issues configuration
issues:
# Maximum issues count per one linter
max-issues-per-linter: 0
# Maximum count of issues with the same text
max-same-issues: 3
# Show only new issues
new: false
# Fix found issues (if linter supports)
fix: false
# Formatters configuration
formatters:
enable:
- gofmt
- goimports
- gofumpt
settings:
# gofmt settings
gofmt:
simplify: true
# goimports settings
goimports:
local-prefixes:
- github.com/kjanat/articulate-parser
# gofumpt settings
gofumpt:
module-path: github.com/kjanat/articulate-parser
extra-rules: true
# Linters configuration
linters:
# Default set of linters
default: none
# Enable specific linters
enable:
# Default/standard linters
- errcheck # Check for unchecked errors
- govet # Go vet
- ineffassign # Detect ineffectual assignments
- staticcheck # Staticcheck
- unused # Find unused code
# Code quality
- revive # Fast, configurable linter
- gocritic # Opinionated Go source code linter
- godot # Check comment periods
- godox # Detect TODO/FIXME comments
- gocognit # Cognitive complexity
- gocyclo # Cyclomatic complexity
- funlen # Function length
- maintidx # Maintainability index
# Security
- gosec # Security problems
# Performance
- prealloc # Find slice preallocation opportunities
- bodyclose # Check HTTP response body closed
# Style and formatting
- goconst # Find repeated strings
- misspell # Find misspellings
- whitespace # Find unnecessary blank lines
- unconvert # Remove unnecessary type conversions
- dupword # Check for duplicate words
# Error handling
- errorlint # Error handling improvements
- wrapcheck # Check error wrapping
# Testing
- testifylint # Testify usage
- tparallel # Detect improper t.Parallel() usage
- thelper # Detect test helpers without t.Helper()
# Best practices
- exhaustive # Check exhaustiveness of enum switches
- nolintlint # Check nolint directives
- nakedret # Find naked returns
- nilnil # Check for redundant nil checks
- noctx # Check sending HTTP requests without context
- contextcheck # Check context propagation
- asciicheck # Check for non-ASCII identifiers
- bidichk # Check for dangerous unicode sequences
- durationcheck # Check for multiplied durations
- makezero # Find slice declarations with non-zero length
- nilerr # Find code returning nil with non-nil error
- predeclared # Find code shadowing predeclared identifiers
- promlinter # Check Prometheus metrics naming
- reassign # Check reassignment of package variables
- usestdlibvars # Use variables from stdlib
- wastedassign # Find wasted assignments
# Documentation
- godoclint # Check godoc comments
# New
- modernize # Suggest simplifications using new Go features
# Exclusion rules for linters
exclusions:
rules:
# Exclude some linters from test files
- path: _test\.go
linters:
- gosec
- dupl
- errcheck
- goconst
- funlen
- goerr113
- gocognit
# Exclude benchmarks from some linters
- path: _bench_test\.go
linters:
- gosec
- dupl
- errcheck
- goconst
- funlen
- goerr113
- wrapcheck
# Exclude example tests
- path: _example_test\.go
linters:
- gosec
- errcheck
- funlen
- goerr113
- wrapcheck
- revive
# Exclude linters for main.go
- path: ^main\.go$
linters:
- forbidigo
# Exclude certain linters for generated files
- path: internal/version/version.go
linters:
- gochecknoglobals
- gochecknoinits
# Exclude var-naming for interfaces package (standard Go pattern for interface definitions)
- path: internal/interfaces/
text: "var-naming: avoid meaningless package names"
linters:
- revive
# Allow fmt.Print* in main package
- path: ^main\.go$
text: "use of fmt.Print"
linters:
- forbidigo
# Exclude common false positives
- text: "Error return value of .((os\\.)?std(out|err)\\..*|.*Close|.*Flush|os\\.Remove(All)?|.*print(f|ln)?|os\\.(Un)?Setenv). is not checked"
linters:
- errcheck
# Exclude error wrapping suggestions for well-known errors
- text: "non-wrapping format verb for fmt.Errorf"
linters:
- errorlint
# Linters settings
settings:
# errcheck settings
errcheck:
check-type-assertions: true
check-blank: false
# govet settings
govet:
enable-all: true
disable:
- fieldalignment # Too many false positives
- shadow # Can be noisy
# goconst settings
goconst:
min-len: 3
min-occurrences: 3
# godot settings
godot:
scope: toplevel
exclude:
- "^fixme:"
- "^todo:"
capital: true
period: true
# godox settings
godox:
keywords:
- TODO
- FIXME
- HACK
- BUG
- XXX
# misspell settings
misspell:
locale: US
# funlen settings
funlen:
lines: 100
statements: 50
# gocognit settings
gocognit:
min-complexity: 20
# gocyclo settings
gocyclo:
min-complexity: 15
# gocritic settings
gocritic:
enabled-tags:
- diagnostic
- style
- performance
- experimental
disabled-checks:
- ifElseChain
- singleCaseSwitch
- commentedOutCode
settings:
hugeParam:
sizeThreshold: 512
rangeValCopy:
sizeThreshold: 512
# gosec settings
gosec:
severity: medium
confidence: medium
excludes:
- G104 # Handled by errcheck
- G304 # File path provided as taint input
# revive settings
revive:
severity: warning
rules:
- name: blank-imports
- name: context-as-argument
- name: context-keys-type
- name: dot-imports
- name: empty-block
- name: error-naming
- name: error-return
- name: error-strings
- name: errorf
- name: exported
- name: if-return
- name: increment-decrement
- name: indent-error-flow
- name: package-comments
- name: range
- name: receiver-naming
- name: time-naming
- name: unexported-return
- name: var-declaration
- name: var-naming
# errorlint settings
errorlint:
errorf: true
errorf-multi: true
asserts: true
comparison: true
# wrapcheck settings
wrapcheck:
ignore-sigs:
- .Errorf(
- errors.New(
- errors.Unwrap(
- errors.Join(
- .WithMessage(
- .WithMessagef(
- .WithStack(
ignore-package-globs:
- github.com/kjanat/articulate-parser/*
# exhaustive settings
exhaustive:
check:
- switch
- map
default-signifies-exhaustive: true
# nolintlint settings
nolintlint:
allow-unused: false
require-explanation: true
require-specific: true
# stylecheck settings
staticcheck:
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022"]
# maintidx settings
maintidx:
under: 20
# testifylint settings
testifylint:
enable-all: true
disable:
- float-compare
# thelper settings
thelper:
test:
first: true
name: true
begin: true
benchmark:
first: true
name: true
begin: true
# Severity rules
severity:
default: warning
rules:
- linters:
- gosec
severity: error
- linters:
- errcheck
- staticcheck
severity: error
- linters:
- godox
severity: info

View File

@ -1,56 +0,0 @@
# Agent Guidelines for articulate-parser
## Build/Test Commands
- **Build**: `task build` or `go build -o bin/articulate-parser main.go`
- **Run tests**: `task test` or `go test -race -timeout 5m ./...`
- **Run single test**: `go test -v -race -run ^TestName$ ./path/to/package`
- **Test with coverage**:
- `task test:coverage` or
- `go test -race -coverprofile=coverage/coverage.out -covermode=atomic ./...`
- **Lint**: `task lint` (runs vet, fmt check, staticcheck, golangci-lint)
- **Format**: `task fmt` or `gofmt -s -w .`
- **CI checks**: `task ci` (deps, lint, test with coverage, build)
## Code Style Guidelines
### Imports
- Use `goimports` with local prefix: `github.com/kjanat/articulate-parser`
- Order: stdlib, external, internal packages
- Group related imports together
### Formatting
- Use `gofmt -s` (simplify) and `gofumpt` with extra rules
- Function length: max 100 lines, 50 statements
- Cyclomatic complexity: max 15
- Cognitive complexity: max 20
### Types & Naming
- Use interface-based design (see `internal/interfaces/`)
- Export types/functions with clear godoc comments ending with period
- Use descriptive names: `ArticulateParser`, `MarkdownExporter`
- Receiver names: short (1-2 chars), consistent per type
### Error Handling
- Always wrap errors with context: `fmt.Errorf("operation failed: %w", err)`
- Use `%w` verb for error wrapping to preserve error chain
- Check all error returns (enforced by `errcheck`)
- Document error handling rationale in defer blocks when ignoring close errors
### Comments
- All exported types/functions require godoc comments
- End sentences with periods (`godot` linter enforced)
- Mark known issues with TODO/FIXME/HACK/BUG/XXX
### Security
- Use `#nosec` with justification for deliberate security exceptions (G304 for CLI file paths, G306 for export file permissions)
- Run `gosec` and `govulncheck` for security audits
### Testing
- Enable race detection: `-race` flag
- Use table-driven tests where applicable
- Mark test helpers with `t.Helper()`
- Benchmarks in `*_bench_test.go`, examples in `*_example_test.go`
### Dependencies
- Minimal external dependencies (currently: go-docx, golang.org/x/net, golang.org/x/text)
- Run `task deps:tidy` after adding/removing dependencies

View File

@ -1,5 +1,5 @@
# Build stage # Build stage
FROM golang:1.25-alpine AS builder FROM golang:1.24-alpine AS builder
# Install git and ca-certificates (needed for fetching dependencies and HTTPS) # Install git and ca-certificates (needed for fetching dependencies and HTTPS)
RUN apk add --no-cache git ca-certificates tzdata file RUN apk add --no-cache git ca-certificates tzdata file

View File

@ -2,7 +2,7 @@
# Uses Alpine instead of scratch for debugging # Uses Alpine instead of scratch for debugging
# Build stage - same as production # Build stage - same as production
FROM golang:1.25-alpine AS builder FROM golang:1.24-alpine AS builder
# Install git and ca-certificates (needed for fetching dependencies and HTTPS) # Install git and ca-certificates (needed for fetching dependencies and HTTPS)
RUN apk add --no-cache git ca-certificates tzdata file RUN apk add --no-cache git ca-certificates tzdata file
@ -49,7 +49,7 @@ RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build \
RUN file /app/articulate-parser || echo "file command not available" RUN file /app/articulate-parser || echo "file command not available"
# Development stage - uses Alpine for shell access # Development stage - uses Alpine for shell access
FROM alpine:3 FROM alpine:3.21.3
# Install minimal dependencies # Install minimal dependencies
RUN apk add --no-cache ca-certificates tzdata RUN apk add --no-cache ca-certificates tzdata

View File

@ -1,602 +0,0 @@
# yaml-language-server: $schema=https://taskfile.dev/schema.json
# Articulate Parser - Task Automation
# https://taskfile.dev
version: '3'
# Global output settings
output: prefixed
# Shell settings (only applied on Unix-like systems)
# Note: These are ignored on Windows where PowerShell/cmd is used
set: [errexit, pipefail]
shopt: [globstar]
# Watch mode interval
interval: 500ms
# Global variables
vars:
APP_NAME: articulate-parser
MAIN_FILE: main.go
OUTPUT_DIR: bin
COVERAGE_DIR: coverage
TEST_TIMEOUT: 5m
# Version info
VERSION:
sh: git describe --tags --always --dirty 2>/dev/null || echo "dev"
GIT_COMMIT:
sh: git rev-parse --short HEAD 2>/dev/null || echo "unknown"
BUILD_TIME: '{{now | date "2006-01-02T15:04:05Z07:00"}}'
# Go settings
CGO_ENABLED: 0
GO_FLAGS: -v
LDFLAGS: >-
-s -w
-X github.com/kjanat/articulate-parser/internal/version.Version={{.VERSION}}
-X github.com/kjanat/articulate-parser/internal/version.BuildTime={{.BUILD_TIME}}
-X github.com/kjanat/articulate-parser/internal/version.GitCommit={{.GIT_COMMIT}}
# Platform detection (using Task built-in variables)
GOOS:
sh: go env GOOS
GOARCH:
sh: go env GOARCH
EXE_EXT: '{{if eq OS "windows"}}.exe{{end}}'
# Environment variables
env:
CGO_ENABLED: '{{.CGO_ENABLED}}'
GO111MODULE: on
# Load .env files if present
dotenv: ['.env', '.env.local']
# Task definitions
tasks:
# Default task - show help
default:
desc: Show available tasks
cmds:
- task --list
silent: true
# Development tasks
dev:
desc: Run the application in development mode (with hot reload)
aliases: [run, start]
interactive: true
watch: true
sources:
- '**/*.go'
- go.mod
- go.sum
cmds:
- task: build
- '{{.OUTPUT_DIR}}/{{.APP_NAME}}{{.EXE_EXT}} --help'
# Build tasks
build:
desc: Build the application binary
aliases: [b]
deps: [clean-bin]
sources:
- '**/*.go'
- go.mod
- go.sum
generates:
- '{{.OUTPUT_DIR}}/{{.APP_NAME}}{{.EXE_EXT}}'
cmds:
- task: mkdir
vars: { DIR: '{{.OUTPUT_DIR}}' }
- go build {{.GO_FLAGS}} -ldflags="{{.LDFLAGS}}" -o {{.OUTPUT_DIR}}/{{.APP_NAME}}{{.EXE_EXT}} {{.MAIN_FILE}}
method: checksum
build:all:
desc: Build binaries for all major platforms
aliases: [build-all, cross-compile]
deps: [clean-bin]
cmds:
- task: mkdir
vars: { DIR: '{{.OUTPUT_DIR}}' }
- for:
matrix:
GOOS: [linux, darwin, windows]
GOARCH: [amd64, arm64]
task: build:platform
vars:
TARGET_GOOS: '{{.ITEM.GOOS}}'
TARGET_GOARCH: '{{.ITEM.GOARCH}}'
- echo "Built binaries for all platforms in {{.OUTPUT_DIR}}/"
build:platform:
internal: true
vars:
TARGET_EXT: '{{if eq .TARGET_GOOS "windows"}}.exe{{end}}'
OUTPUT_FILE: '{{.OUTPUT_DIR}}/{{.APP_NAME}}-{{.TARGET_GOOS}}-{{.TARGET_GOARCH}}{{.TARGET_EXT}}'
env:
GOOS: '{{.TARGET_GOOS}}'
GOARCH: '{{.TARGET_GOARCH}}'
cmds:
- echo "Building {{.OUTPUT_FILE}}..."
- go build {{.GO_FLAGS}} -ldflags="{{.LDFLAGS}}" -o "{{.OUTPUT_FILE}}" {{.MAIN_FILE}}
# Install task
install:
desc: Install the binary to $GOPATH/bin
deps: [test]
cmds:
- go install -ldflags="{{.LDFLAGS}}" {{.MAIN_FILE}}
- echo "Installed {{.APP_NAME}} to $(go env GOPATH)/bin"
# Testing tasks
test:
desc: Run all tests
aliases: [t]
cmds:
- go test {{.GO_FLAGS}} -race -timeout {{.TEST_TIMEOUT}} ./...
test:coverage:
desc: Run tests with coverage report
aliases: [cover, cov]
deps: [clean-coverage]
cmds:
- task: mkdir
vars: { DIR: '{{.COVERAGE_DIR}}' }
- go test {{.GO_FLAGS}} -race -coverprofile={{.COVERAGE_DIR}}/coverage.out -covermode=atomic -timeout {{.TEST_TIMEOUT}} ./...
- go tool cover -html={{.COVERAGE_DIR}}/coverage.out -o {{.COVERAGE_DIR}}/coverage.html
- go tool cover -func={{.COVERAGE_DIR}}/coverage.out
- echo "Coverage report generated at {{.COVERAGE_DIR}}/coverage.html"
test:verbose:
desc: Run tests with verbose output
aliases: [tv]
cmds:
- go test -v -race -timeout {{.TEST_TIMEOUT}} ./...
test:watch:
desc: Run tests in watch mode
aliases: [tw]
watch: true
sources:
- '**/*.go'
cmds:
- task: test
test:bench:
desc: Run benchmark tests
aliases: [bench]
cmds:
- go test -bench=. -benchmem -timeout {{.TEST_TIMEOUT}} ./...
test:integration:
desc: Run integration tests
status:
- '{{if eq OS "windows"}}if not exist "main_test.go" exit 1{{else}}test ! -f "main_test.go"{{end}}'
cmds:
- go test -v -race -tags=integration -timeout {{.TEST_TIMEOUT}} ./...
# Code quality tasks
lint:
desc: Run all linters
silent: true
aliases: [l]
cmds:
- task: lint:vet
- task: lint:fmt
- task: lint:staticcheck
- task: lint:golangci
lint:vet:
desc: Run go vet
silent: true
cmds:
- go vet ./...
lint:fmt:
desc: Check code formatting
silent: true
vars:
UNFORMATTED:
sh: gofmt -s -l .
cmds:
- |
{{if ne .UNFORMATTED ""}}
echo "❌ The following files need formatting:"
echo "{{.UNFORMATTED}}"
exit 1
{{else}}
echo "All files are properly formatted"
{{end}}
lint:staticcheck:
desc: Run staticcheck (install if needed)
silent: true
vars:
HAS_STATICCHECK:
sh: '{{if eq OS "windows"}}where.exe staticcheck 2>NUL{{else}}command -v staticcheck 2>/dev/null{{end}}'
cmds:
- '{{if eq .HAS_STATICCHECK ""}}echo "Installing staticcheck..." && go install honnef.co/go/tools/cmd/staticcheck@latest{{end}}'
- staticcheck ./...
ignore_error: true
lint:golangci:
desc: Run golangci-lint (install if needed)
silent: true
aliases: [golangci, golangci-lint]
vars:
HAS_GOLANGCI:
sh: '{{if eq OS "windows"}}where.exe golangci-lint 2>NUL{{else}}command -v golangci-lint 2>/dev/null{{end}}'
cmds:
- '{{if eq .HAS_GOLANGCI ""}}echo "Installing golangci-lint..." && go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest{{end}}'
- golangci-lint run ./...
- echo "✅ golangci-lint passed"
lint:golangci:fix:
desc: Run golangci-lint with auto-fix
silent: true
aliases: [golangci-fix]
vars:
HAS_GOLANGCI:
sh: '{{if eq OS "windows"}}where.exe golangci-lint 2>NUL{{else}}command -v golangci-lint 2>/dev/null{{end}}'
cmds:
- '{{if eq .HAS_GOLANGCI ""}}echo "Installing golangci-lint..." && go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest{{end}}'
- golangci-lint run --fix ./...
- echo "golangci-lint fixes applied"
fmt:
desc: Format all Go files
silent: true
aliases: [format]
cmds:
- gofmt -s -w .
- echo "Formatted all Go files"
modernize:
desc: Modernize Go code to use modern idioms
silent: true
aliases: [modern]
cmds:
- go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./...
- echo "Code modernized"
# Dependency management
deps:
desc: Download and verify dependencies
aliases: [mod]
cmds:
- go mod download
- go mod verify
- echo "Dependencies downloaded and verified"
deps:tidy:
desc: Tidy go.mod and go.sum
aliases: [tidy]
cmds:
- go mod tidy
- echo "Dependencies tidied"
deps:update:
desc: Update all dependencies to latest versions
aliases: [update]
cmds:
- go get -u ./...
- go mod tidy
- echo "Dependencies updated"
deps:graph:
desc: Display dependency graph
cmds:
- go mod graph
# Docker tasks
docker:build:
desc: Build Docker image
aliases: [db]
cmds:
- |
docker build \
--build-arg VERSION={{.VERSION}} \
--build-arg BUILD_TIME={{.BUILD_TIME}} \
--build-arg GIT_COMMIT={{.GIT_COMMIT}} \
-t {{.APP_NAME}}:{{.VERSION}} \
-t {{.APP_NAME}}:latest \
.
- >
echo "Docker image built: {{.APP_NAME}}:{{.VERSION}}"
docker:build:dev:
desc: Build development Docker image
cmds:
- docker build -f Dockerfile.dev -t {{.APP_NAME}}:dev .
- >
echo "Development Docker image built: {{.APP_NAME}}:dev"
docker:run:
desc: Run Docker container
aliases: [dr]
deps: [docker:build]
cmds:
- docker run --rm {{.APP_NAME}}:{{.VERSION}} --help
docker:test:
desc: Test Docker image
deps: [docker:build]
cmds:
- docker run --rm {{.APP_NAME}}:{{.VERSION}} --version
- echo "Docker image tested successfully"
docker:compose:up:
desc: Start services with docker-compose
cmds:
- docker-compose up -d
docker:compose:down:
desc: Stop services with docker-compose
cmds:
- docker-compose down
# Cleanup tasks
clean:
desc: Clean all generated files
aliases: [c]
cmds:
- task: clean-bin
- task: clean-coverage
- task: clean-cache
- echo "All generated files cleaned"
clean-bin:
desc: Remove built binaries
internal: true
cmds:
- task: rmdir
vars: { DIR: '{{.OUTPUT_DIR}}' }
clean-coverage:
desc: Remove coverage files
internal: true
cmds:
- task: rmdir
vars: { DIR: '{{.COVERAGE_DIR}}' }
clean-cache:
desc: Clean Go build and test cache
cmds:
- go clean -cache -testcache -modcache
- echo "Go caches cleaned"
# CI/CD tasks
ci:
desc: Run all CI checks (test, lint, build)
cmds:
- task: deps
- task: lint
- task: test:coverage
- task: build
- echo "All CI checks passed"
ci:local:
desc: Run CI checks locally with detailed output
cmds:
- echo "🔍 Running local CI checks..."
- echo ""
- echo "📦 Checking dependencies..."
- task: deps
- echo ""
- echo "🔧 Running linters..."
- task: lint
- echo ""
- echo "🧪 Running tests with coverage..."
- task: test:coverage
- echo ""
- echo "🏗️ Building application..."
- task: build:all
- echo ""
- echo "All CI checks completed successfully!"
# Release tasks
release:check:
desc: Check if ready for release
cmds:
- task: ci
- git diff --exit-code
- git diff --cached --exit-code
- echo "Ready for release"
release:tag:
desc: Tag a new release (requires VERSION env var)
requires:
vars: [VERSION]
preconditions:
- sh: 'git diff --exit-code'
msg: 'Working directory is not clean'
- sh: 'git diff --cached --exit-code'
msg: 'Staging area is not clean'
cmds:
- git tag -a v{{.VERSION}} -m "Release v{{.VERSION}}"
- echo "Tagged v{{.VERSION}}"
- >
echo "Push with: git push origin v{{.VERSION}}"
# Documentation tasks
docs:serve:
desc: Serve documentation locally
vars:
HAS_GODOC:
sh: '{{if eq OS "windows"}}where.exe godoc 2>NUL{{else}}command -v godoc 2>/dev/null{{end}}'
cmds:
- '{{if eq .HAS_GODOC ""}}echo "Installing godoc..." && go install golang.org/x/tools/cmd/godoc@latest{{end}}'
- echo "📚 Serving documentation at http://localhost:6060"
- godoc -http=:6060
interactive: true
docs:coverage:
desc: Open coverage report in browser
deps: [test:coverage]
cmds:
- '{{if eq OS "darwin"}}open {{.COVERAGE_DIR}}/coverage.html{{else if eq OS "windows"}}start {{.COVERAGE_DIR}}/coverage.html{{else}}xdg-open {{.COVERAGE_DIR}}/coverage.html 2>/dev/null || echo "Please open {{.COVERAGE_DIR}}/coverage.html in your browser"{{end}}'
# Info tasks
info:
desc: Display build information
vars:
GO_VERSION:
sh: go version
cmds:
- task: info:print
silent: true
info:print:
internal: true
silent: true
vars:
GO_VERSION:
sh: go version
cmds:
- echo "Application Info:"
- echo " Name{{":"}} {{.APP_NAME}}"
- echo " Version{{":"}} {{.VERSION}}"
- echo " Git Commit{{":"}} {{.GIT_COMMIT}}"
- echo " Build Time{{":"}} {{.BUILD_TIME}}"
- echo ""
- echo "Go Environment{{":"}}"
- echo " Go Version{{":"}} {{.GO_VERSION}}"
- echo " GOOS{{":"}} {{.GOOS}}"
- echo " GOARCH{{":"}} {{.GOARCH}}"
- echo " CGO{{":"}} {{.CGO_ENABLED}}"
- echo ""
- echo "Paths{{":"}}"
- echo " Output Dir{{":"}} {{.OUTPUT_DIR}}"
- echo " Coverage{{":"}} {{.COVERAGE_DIR}}"
# Security tasks
security:check:
desc: Run security checks with gosec
vars:
HAS_GOSEC:
sh: '{{if eq OS "windows"}}where.exe gosec 2>NUL{{else}}command -v gosec 2>/dev/null{{end}}'
cmds:
- '{{if eq .HAS_GOSEC ""}}echo "Installing gosec..." && go install github.com/securego/gosec/v2/cmd/gosec@latest{{end}}'
- gosec ./...
ignore_error: true
security:audit:
desc: Audit dependencies for vulnerabilities
vars:
HAS_GOVULNCHECK:
sh: '{{if eq OS "windows"}}where.exe govulncheck 2>NUL{{else}}command -v govulncheck 2>/dev/null{{end}}'
cmds:
- '{{if eq .HAS_GOVULNCHECK ""}}echo "Installing govulncheck..." && go install golang.org/x/vuln/cmd/govulncheck@latest{{end}}'
- govulncheck ./...
# Example/Demo tasks
demo:markdown:
desc: Demo - Convert sample to Markdown
status:
- '{{if eq OS "windows"}}if not exist "articulate-sample.json" exit 1{{else}}test ! -f "articulate-sample.json"{{end}}'
deps: [build]
cmds:
- '{{.OUTPUT_DIR}}/{{.APP_NAME}}{{.EXE_EXT}} articulate-sample.json md output-demo.md'
- echo "Demo Markdown created{{:}} output-demo.md"
- defer:
task: rmfile
vars: { FILE: 'output-demo.md' }
demo:html:
desc: Demo - Convert sample to HTML
status:
- '{{if eq OS "windows"}}if not exist "articulate-sample.json" exit 1{{else}}test ! -f "articulate-sample.json"{{end}}'
deps: [build]
cmds:
- '{{.OUTPUT_DIR}}/{{.APP_NAME}}{{.EXE_EXT}} articulate-sample.json html output-demo.html'
- echo "Demo HTML created{{:}} output-demo.html"
- defer:
task: rmfile
vars: { FILE: 'output-demo.html' }
demo:docx:
desc: Demo - Convert sample to DOCX
status:
- '{{if eq OS "windows"}}if not exist "articulate-sample.json" exit 1{{else}}test ! -f "articulate-sample.json"{{end}}'
deps: [build]
cmds:
- '{{.OUTPUT_DIR}}/{{.APP_NAME}}{{.EXE_EXT}} articulate-sample.json docx output-demo.docx'
- echo "Demo DOCX created{{:}} output-demo.docx"
- defer:
task: rmfile
vars: { FILE: 'output-demo.docx' }
# Performance profiling
profile:cpu:
desc: Run CPU profiling
cmds:
- go test -cpuprofile=cpu.prof -bench=. ./...
- go tool pprof -http=:8080 cpu.prof
- defer:
task: rmfile
vars: { FILE: 'cpu.prof' }
profile:mem:
desc: Run memory profiling
cmds:
- go test -memprofile=mem.prof -bench=. ./...
- go tool pprof -http=:8080 mem.prof
- defer:
task: rmfile
vars: { FILE: 'mem.prof' }
# Git hooks
hooks:install:
desc: Install git hooks
cmds:
- task: mkdir
vars: { DIR: '.git/hooks' }
- '{{if eq OS "windows"}}echo "#!/bin/sh" > .git/hooks/pre-commit && echo "task lint:fmt" >> .git/hooks/pre-commit{{else}}cat > .git/hooks/pre-commit << ''EOF''{{printf "\n"}}#!/bin/sh{{printf "\n"}}task lint:fmt{{printf "\n"}}EOF{{printf "\n"}}chmod +x .git/hooks/pre-commit{{end}}'
- echo "Git hooks installed"
# Quick shortcuts
qa:
desc: Quick quality assurance (fmt + lint + test)
aliases: [q, quick]
cmds:
- task: fmt
- task: lint
- task: test
- echo "Quick QA passed"
all:
desc: Build everything (clean + deps + test + build:all + docker:build)
cmds:
- task: clean
- task: deps:tidy
- task: test:coverage
- task: build:all
- task: docker:build
- echo "Full build completed!"
# Cross-platform helper tasks
mkdir:
internal: true
requires:
vars: [DIR]
cmds:
- '{{if eq OS "windows"}}powershell -Command "New-Item -ItemType Directory -Force -Path ''{{.DIR}}'' | Out-Null"{{else}}mkdir -p "{{.DIR}}"{{end}}'
silent: true
rmdir:
internal: true
requires:
vars: [DIR]
cmds:
- '{{if eq OS "windows"}}powershell -Command "if (Test-Path ''{{.DIR}}'') { Remove-Item -Recurse -Force ''{{.DIR}}'' }"{{else}}rm -rf "{{.DIR}}" 2>/dev/null || true{{end}}'
silent: true
rmfile:
internal: true
requires:
vars: [FILE]
cmds:
- '{{if eq OS "windows"}}powershell -Command "if (Test-Path ''{{.FILE}}'') { Remove-Item -Force ''{{.FILE}}'' }"{{else}}rm -f "{{.FILE}}"{{end}}'
silent: true

10
go.mod
View File

@ -1,14 +1,10 @@
module github.com/kjanat/articulate-parser module github.com/kjanat/articulate-parser
go 1.24.0 go 1.23.0
require ( require github.com/fumiama/go-docx v0.0.0-20250506085032-0c30fd09304b
github.com/fumiama/go-docx v0.0.0-20250506085032-0c30fd09304b
golang.org/x/net v0.46.0
golang.org/x/text v0.30.0
)
require ( require (
github.com/fumiama/imgsz v0.0.4 // indirect github.com/fumiama/imgsz v0.0.4 // indirect
golang.org/x/image v0.32.0 // indirect golang.org/x/image v0.27.0 // indirect
) )

8
go.sum
View File

@ -2,9 +2,5 @@ github.com/fumiama/go-docx v0.0.0-20250506085032-0c30fd09304b h1:/mxSugRc4SgN7Xg
github.com/fumiama/go-docx v0.0.0-20250506085032-0c30fd09304b/go.mod h1:ssRF0IaB1hCcKIObp3FkZOsjTcAHpgii70JelNb4H8M= github.com/fumiama/go-docx v0.0.0-20250506085032-0c30fd09304b/go.mod h1:ssRF0IaB1hCcKIObp3FkZOsjTcAHpgii70JelNb4H8M=
github.com/fumiama/imgsz v0.0.4 h1:Lsasu2hdSSFS+vnD+nvR1UkiRMK7hcpyYCC0FzgSMFI= github.com/fumiama/imgsz v0.0.4 h1:Lsasu2hdSSFS+vnD+nvR1UkiRMK7hcpyYCC0FzgSMFI=
github.com/fumiama/imgsz v0.0.4/go.mod h1:bISOQVTlw9sRytPwe8ir7tAaEmyz9hSNj9n8mXMBG0E= github.com/fumiama/imgsz v0.0.4/go.mod h1:bISOQVTlw9sRytPwe8ir7tAaEmyz9hSNj9n8mXMBG0E=
golang.org/x/image v0.32.0 h1:6lZQWq75h7L5IWNk0r+SCpUJ6tUVd3v4ZHnbRKLkUDQ= golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=
golang.org/x/image v0.32.0/go.mod h1:/R37rrQmKXtO6tYXAjtDLwQgFLHmhW+V6ayXlxzP2Pc= golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=

View File

@ -1,77 +0,0 @@
// Package config provides configuration management for the articulate-parser application.
// It supports loading configuration from environment variables and command-line flags.
package config
import (
"log/slog"
"os"
"strconv"
"time"
)
// Config holds all configuration values for the application.
type Config struct {
// Parser configuration
BaseURL string
RequestTimeout time.Duration
// Logging configuration
LogLevel slog.Level
LogFormat string // "json" or "text"
}
// Default configuration values.
const (
DefaultBaseURL = "https://rise.articulate.com"
DefaultRequestTimeout = 30 * time.Second
DefaultLogLevel = slog.LevelInfo
DefaultLogFormat = "text"
)
// Load creates a new Config with values from environment variables.
// Falls back to defaults if environment variables are not set.
func Load() *Config {
return &Config{
BaseURL: getEnv("ARTICULATE_BASE_URL", DefaultBaseURL),
RequestTimeout: getDurationEnv("ARTICULATE_REQUEST_TIMEOUT", DefaultRequestTimeout),
LogLevel: getLogLevelEnv("LOG_LEVEL", DefaultLogLevel),
LogFormat: getEnv("LOG_FORMAT", DefaultLogFormat),
}
}
// getEnv retrieves an environment variable or returns the default value.
func getEnv(key, defaultValue string) string {
if value := os.Getenv(key); value != "" {
return value
}
return defaultValue
}
// getDurationEnv retrieves a duration from environment variable or returns default.
// The environment variable should be in seconds (e.g., "30" for 30 seconds).
func getDurationEnv(key string, defaultValue time.Duration) time.Duration {
if value := os.Getenv(key); value != "" {
if seconds, err := strconv.Atoi(value); err == nil {
return time.Duration(seconds) * time.Second
}
}
return defaultValue
}
// getLogLevelEnv retrieves a log level from environment variable or returns default.
// Accepts: "debug", "info", "warn", "error" (case-insensitive).
func getLogLevelEnv(key string, defaultValue slog.Level) slog.Level {
value := os.Getenv(key)
switch value {
case "debug", "DEBUG":
return slog.LevelDebug
case "info", "INFO":
return slog.LevelInfo
case "warn", "WARN", "warning", "WARNING":
return slog.LevelWarn
case "error", "ERROR":
return slog.LevelError
default:
return defaultValue
}
}

View File

@ -1,116 +0,0 @@
package config
import (
"log/slog"
"os"
"testing"
"time"
)
func TestLoad(t *testing.T) {
// Clear environment
os.Clearenv()
cfg := Load()
if cfg.BaseURL != DefaultBaseURL {
t.Errorf("Expected BaseURL '%s', got '%s'", DefaultBaseURL, cfg.BaseURL)
}
if cfg.RequestTimeout != DefaultRequestTimeout {
t.Errorf("Expected timeout %v, got %v", DefaultRequestTimeout, cfg.RequestTimeout)
}
if cfg.LogLevel != DefaultLogLevel {
t.Errorf("Expected log level %v, got %v", DefaultLogLevel, cfg.LogLevel)
}
if cfg.LogFormat != DefaultLogFormat {
t.Errorf("Expected log format '%s', got '%s'", DefaultLogFormat, cfg.LogFormat)
}
}
func TestLoad_WithEnvironmentVariables(t *testing.T) {
// Set environment variables
t.Setenv("ARTICULATE_BASE_URL", "https://test.example.com")
t.Setenv("ARTICULATE_REQUEST_TIMEOUT", "60")
t.Setenv("LOG_LEVEL", "debug")
t.Setenv("LOG_FORMAT", "json")
cfg := Load()
if cfg.BaseURL != "https://test.example.com" {
t.Errorf("Expected BaseURL 'https://test.example.com', got '%s'", cfg.BaseURL)
}
if cfg.RequestTimeout != 60*time.Second {
t.Errorf("Expected timeout 60s, got %v", cfg.RequestTimeout)
}
if cfg.LogLevel != slog.LevelDebug {
t.Errorf("Expected log level Debug, got %v", cfg.LogLevel)
}
if cfg.LogFormat != "json" {
t.Errorf("Expected log format 'json', got '%s'", cfg.LogFormat)
}
}
func TestGetLogLevelEnv(t *testing.T) {
tests := []struct {
name string
value string
expected slog.Level
}{
{"debug lowercase", "debug", slog.LevelDebug},
{"debug uppercase", "DEBUG", slog.LevelDebug},
{"info lowercase", "info", slog.LevelInfo},
{"info uppercase", "INFO", slog.LevelInfo},
{"warn lowercase", "warn", slog.LevelWarn},
{"warn uppercase", "WARN", slog.LevelWarn},
{"warning lowercase", "warning", slog.LevelWarn},
{"error lowercase", "error", slog.LevelError},
{"error uppercase", "ERROR", slog.LevelError},
{"invalid value", "invalid", slog.LevelInfo},
{"empty value", "", slog.LevelInfo},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
os.Clearenv()
if tt.value != "" {
t.Setenv("TEST_LOG_LEVEL", tt.value)
}
result := getLogLevelEnv("TEST_LOG_LEVEL", slog.LevelInfo)
if result != tt.expected {
t.Errorf("Expected %v, got %v", tt.expected, result)
}
})
}
}
func TestGetDurationEnv(t *testing.T) {
tests := []struct {
name string
value string
expected time.Duration
}{
{"valid duration", "45", 45 * time.Second},
{"zero duration", "0", 0},
{"invalid duration", "invalid", 30 * time.Second},
{"empty value", "", 30 * time.Second},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
os.Clearenv()
if tt.value != "" {
t.Setenv("TEST_DURATION", tt.value)
}
result := getDurationEnv("TEST_DURATION", 30*time.Second)
if result != tt.expected {
t.Errorf("Expected %v, got %v", tt.expected, result)
}
})
}
}

View File

@ -1,200 +0,0 @@
package exporters
import (
"path/filepath"
"testing"
"github.com/kjanat/articulate-parser/internal/models"
"github.com/kjanat/articulate-parser/internal/services"
)
// BenchmarkFactory_CreateExporter_Markdown benchmarks markdown exporter creation.
func BenchmarkFactory_CreateExporter_Markdown(b *testing.B) {
htmlCleaner := services.NewHTMLCleaner()
factory := NewFactory(htmlCleaner)
b.ResetTimer()
for b.Loop() {
_, _ = factory.CreateExporter("markdown")
}
}
// BenchmarkFactory_CreateExporter_All benchmarks creating all exporter types.
func BenchmarkFactory_CreateExporter_All(b *testing.B) {
htmlCleaner := services.NewHTMLCleaner()
factory := NewFactory(htmlCleaner)
formats := []string{"markdown", "docx", "html"}
b.ResetTimer()
for b.Loop() {
for _, format := range formats {
_, _ = factory.CreateExporter(format)
}
}
}
// BenchmarkAllExporters_Export benchmarks all exporters with the same course.
func BenchmarkAllExporters_Export(b *testing.B) {
htmlCleaner := services.NewHTMLCleaner()
course := createBenchmarkCourse()
exporters := map[string]struct {
exporter any
ext string
}{
"Markdown": {NewMarkdownExporter(htmlCleaner), ".md"},
"Docx": {NewDocxExporter(htmlCleaner), ".docx"},
"HTML": {NewHTMLExporter(htmlCleaner), ".html"},
}
for name, exp := range exporters {
b.Run(name, func(b *testing.B) {
tempDir := b.TempDir()
exporter := exp.exporter.(interface {
Export(*models.Course, string) error
})
b.ResetTimer()
for b.Loop() {
outputPath := filepath.Join(tempDir, "benchmark"+exp.ext)
_ = exporter.Export(course, outputPath)
}
})
}
}
// BenchmarkExporters_LargeCourse benchmarks exporters with large course data.
func BenchmarkExporters_LargeCourse(b *testing.B) {
htmlCleaner := services.NewHTMLCleaner()
course := createLargeBenchmarkCourse()
b.Run("Markdown_Large", func(b *testing.B) {
exporter := NewMarkdownExporter(htmlCleaner)
tempDir := b.TempDir()
b.ResetTimer()
for b.Loop() {
outputPath := filepath.Join(tempDir, "large.md")
_ = exporter.Export(course, outputPath)
}
})
b.Run("Docx_Large", func(b *testing.B) {
exporter := NewDocxExporter(htmlCleaner)
tempDir := b.TempDir()
b.ResetTimer()
for b.Loop() {
outputPath := filepath.Join(tempDir, "large.docx")
_ = exporter.Export(course, outputPath)
}
})
b.Run("HTML_Large", func(b *testing.B) {
exporter := NewHTMLExporter(htmlCleaner)
tempDir := b.TempDir()
b.ResetTimer()
for b.Loop() {
outputPath := filepath.Join(tempDir, "large.html")
_ = exporter.Export(course, outputPath)
}
})
}
// createBenchmarkCourse creates a standard-sized course for benchmarking.
func createBenchmarkCourse() *models.Course {
return &models.Course{
ShareID: "benchmark-id",
Author: "Benchmark Author",
Course: models.CourseInfo{
ID: "bench-course",
Title: "Benchmark Course",
Description: "Performance testing course",
NavigationMode: "menu",
Lessons: []models.Lesson{
{
ID: "lesson1",
Title: "Introduction",
Type: "lesson",
Items: []models.Item{
{
Type: "text",
Items: []models.SubItem{
{
Heading: "Welcome",
Paragraph: "<p>This is a test paragraph with <strong>HTML</strong> content.</p>",
},
},
},
{
Type: "list",
Items: []models.SubItem{
{Paragraph: "Item 1"},
{Paragraph: "Item 2"},
{Paragraph: "Item 3"},
},
},
},
},
},
},
}
}
// createLargeBenchmarkCourse creates a large course for stress testing.
func createLargeBenchmarkCourse() *models.Course {
lessons := make([]models.Lesson, 50)
for i := range 50 {
lessons[i] = models.Lesson{
ID: string(rune(i)),
Title: "Lesson " + string(rune(i)),
Type: "lesson",
Description: "<p>This is lesson description with <em>formatting</em>.</p>",
Items: []models.Item{
{
Type: "text",
Items: []models.SubItem{
{
Heading: "Section Heading",
Paragraph: "<p>Content with <strong>bold</strong> and <em>italic</em> text.</p>",
},
},
},
{
Type: "list",
Items: []models.SubItem{
{Paragraph: "Point 1"},
{Paragraph: "Point 2"},
{Paragraph: "Point 3"},
},
},
{
Type: "knowledgeCheck",
Items: []models.SubItem{
{
Title: "Quiz Question",
Answers: []models.Answer{
{Title: "Answer A", Correct: false},
{Title: "Answer B", Correct: true},
{Title: "Answer C", Correct: false},
},
Feedback: "Good job!",
},
},
},
},
}
}
return &models.Course{
ShareID: "large-benchmark-id",
Author: "Benchmark Author",
Course: models.CourseInfo{
ID: "large-bench-course",
Title: "Large Benchmark Course",
Description: "Large performance testing course",
Lessons: lessons,
},
}
}

View File

@ -8,9 +8,6 @@ import (
"strings" "strings"
"github.com/fumiama/go-docx" "github.com/fumiama/go-docx"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"github.com/kjanat/articulate-parser/internal/interfaces" "github.com/kjanat/articulate-parser/internal/interfaces"
"github.com/kjanat/articulate-parser/internal/models" "github.com/kjanat/articulate-parser/internal/models"
"github.com/kjanat/articulate-parser/internal/services" "github.com/kjanat/articulate-parser/internal/services"
@ -69,24 +66,15 @@ func (e *DocxExporter) Export(course *models.Course, outputPath string) error {
// Ensure output directory exists and add .docx extension // Ensure output directory exists and add .docx extension
if !strings.HasSuffix(strings.ToLower(outputPath), ".docx") { if !strings.HasSuffix(strings.ToLower(outputPath), ".docx") {
outputPath += ".docx" outputPath = outputPath + ".docx"
} }
// Create the file // Create the file
// #nosec G304 - Output path is provided by user via CLI argument, which is expected behavior
file, err := os.Create(outputPath) file, err := os.Create(outputPath)
if err != nil { if err != nil {
return fmt.Errorf("failed to create output file: %w", err) return fmt.Errorf("failed to create output file: %w", err)
} }
// Ensure file is closed even if WriteTo fails. Close errors are logged but not defer file.Close()
// fatal since the document content has already been written to disk. A close
// error typically indicates a filesystem synchronization issue that doesn't
// affect the validity of the exported file.
defer func() {
if err := file.Close(); err != nil {
fmt.Fprintf(os.Stderr, "warning: failed to close output file: %v\n", err)
}
}()
// Save the document // Save the document
_, err = doc.WriteTo(file) _, err = doc.WriteTo(file)
@ -131,8 +119,7 @@ func (e *DocxExporter) exportItem(doc *docx.Docx, item *models.Item) {
// Add item type as heading // Add item type as heading
if item.Type != "" { if item.Type != "" {
itemPara := doc.AddParagraph() itemPara := doc.AddParagraph()
caser := cases.Title(language.English) itemPara.AddText(strings.Title(item.Type)).Size("24").Bold()
itemPara.AddText(caser.String(item.Type)).Size("24").Bold()
} }
// Add sub-items // Add sub-items
@ -193,10 +180,10 @@ func (e *DocxExporter) exportSubItem(doc *docx.Docx, subItem *models.SubItem) {
} }
} }
// SupportedFormat returns the format name this exporter supports. // GetSupportedFormat returns the format name this exporter supports.
// //
// Returns: // Returns:
// - A string representing the supported format ("docx") // - A string representing the supported format ("docx")
func (e *DocxExporter) SupportedFormat() string { func (e *DocxExporter) GetSupportedFormat() string {
return FormatDocx return "docx"
} }

View File

@ -1,3 +1,4 @@
// Package exporters_test provides tests for the docx exporter.
package exporters package exporters
import ( import (
@ -29,13 +30,13 @@ func TestNewDocxExporter(t *testing.T) {
} }
} }
// TestDocxExporter_SupportedFormat tests the SupportedFormat method. // TestDocxExporter_GetSupportedFormat tests the GetSupportedFormat method.
func TestDocxExporter_SupportedFormat(t *testing.T) { func TestDocxExporter_GetSupportedFormat(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner() htmlCleaner := services.NewHTMLCleaner()
exporter := NewDocxExporter(htmlCleaner) exporter := NewDocxExporter(htmlCleaner)
expected := "docx" expected := "docx"
result := exporter.SupportedFormat() result := exporter.GetSupportedFormat()
if result != expected { if result != expected {
t.Errorf("Expected format '%s', got '%s'", expected, result) t.Errorf("Expected format '%s', got '%s'", expected, result)
@ -89,6 +90,7 @@ func TestDocxExporter_Export_AddDocxExtension(t *testing.T) {
err := exporter.Export(testCourse, outputPath) err := exporter.Export(testCourse, outputPath)
if err != nil { if err != nil {
t.Fatalf("Export failed: %v", err) t.Fatalf("Export failed: %v", err)
} }
@ -153,6 +155,7 @@ func TestDocxExporter_ExportLesson(t *testing.T) {
err := exporter.Export(course, outputPath) err := exporter.Export(course, outputPath)
if err != nil { if err != nil {
t.Fatalf("Export failed: %v", err) t.Fatalf("Export failed: %v", err)
} }
@ -219,6 +222,7 @@ func TestDocxExporter_ExportItem(t *testing.T) {
err := exporter.Export(course, outputPath) err := exporter.Export(course, outputPath)
if err != nil { if err != nil {
t.Fatalf("Export failed: %v", err) t.Fatalf("Export failed: %v", err)
} }
@ -272,6 +276,7 @@ func TestDocxExporter_ExportSubItem(t *testing.T) {
err := exporter.Export(course, outputPath) err := exporter.Export(course, outputPath)
if err != nil { if err != nil {
t.Fatalf("Export failed: %v", err) t.Fatalf("Export failed: %v", err)
} }
@ -330,7 +335,7 @@ func TestDocxExporter_ComplexCourse(t *testing.T) {
Caption: "<p>Watch this introductory video</p>", Caption: "<p>Watch this introductory video</p>",
Media: &models.Media{ Media: &models.Media{
Video: &models.VideoMedia{ Video: &models.VideoMedia{
OriginalURL: "https://example.com/intro.mp4", OriginalUrl: "https://example.com/intro.mp4",
Duration: 300, Duration: 300,
}, },
}, },
@ -358,7 +363,7 @@ func TestDocxExporter_ComplexCourse(t *testing.T) {
Caption: "<p>Course overview diagram</p>", Caption: "<p>Course overview diagram</p>",
Media: &models.Media{ Media: &models.Media{
Image: &models.ImageMedia{ Image: &models.ImageMedia{
OriginalURL: "https://example.com/overview.png", OriginalUrl: "https://example.com/overview.png",
}, },
}, },
}, },
@ -404,6 +409,7 @@ func TestDocxExporter_ComplexCourse(t *testing.T) {
// Export course // Export course
err := exporter.Export(course, outputPath) err := exporter.Export(course, outputPath)
if err != nil { if err != nil {
t.Fatalf("Export failed: %v", err) t.Fatalf("Export failed: %v", err)
} }
@ -438,6 +444,7 @@ func TestDocxExporter_EmptyCourse(t *testing.T) {
err := exporter.Export(course, outputPath) err := exporter.Export(course, outputPath)
if err != nil { if err != nil {
t.Fatalf("Export failed: %v", err) t.Fatalf("Export failed: %v", err)
} }
@ -486,6 +493,7 @@ func TestDocxExporter_HTMLCleaning(t *testing.T) {
err := exporter.Export(course, outputPath) err := exporter.Export(course, outputPath)
if err != nil { if err != nil {
t.Fatalf("Export failed: %v", err) t.Fatalf("Export failed: %v", err)
} }
@ -508,6 +516,7 @@ func TestDocxExporter_ExistingDocxExtension(t *testing.T) {
err := exporter.Export(testCourse, outputPath) err := exporter.Export(testCourse, outputPath)
if err != nil { if err != nil {
t.Fatalf("Export failed: %v", err) t.Fatalf("Export failed: %v", err)
} }
@ -543,6 +552,7 @@ func TestDocxExporter_CaseInsensitiveExtension(t *testing.T) {
err := exporter.Export(testCourse, outputPath) err := exporter.Export(testCourse, outputPath)
if err != nil { if err != nil {
t.Fatalf("Export failed for case %d (%s): %v", i, testCase, err) t.Fatalf("Export failed for case %d (%s): %v", i, testCase, err)
} }
@ -605,13 +615,12 @@ func BenchmarkDocxExporter_Export(b *testing.B) {
// Create temporary directory // Create temporary directory
tempDir := b.TempDir() tempDir := b.TempDir()
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
outputPath := filepath.Join(tempDir, "benchmark-course.docx") outputPath := filepath.Join(tempDir, "benchmark-course.docx")
_ = exporter.Export(course, outputPath) _ = exporter.Export(course, outputPath)
// Clean up for next iteration. Remove errors are ignored because we've already // Clean up for next iteration
// benchmarked the export operation; cleanup failures don't affect the benchmark os.Remove(outputPath)
// measurements or the validity of the next iteration's export.
_ = os.Remove(outputPath)
} }
} }
@ -632,7 +641,7 @@ func BenchmarkDocxExporter_ComplexCourse(b *testing.B) {
} }
// Fill with test data // Fill with test data
for i := range 10 { for i := 0; i < 10; i++ {
lesson := models.Lesson{ lesson := models.Lesson{
ID: "lesson-" + string(rune(i)), ID: "lesson-" + string(rune(i)),
Title: "Lesson " + string(rune(i)), Title: "Lesson " + string(rune(i)),
@ -640,13 +649,13 @@ func BenchmarkDocxExporter_ComplexCourse(b *testing.B) {
Items: make([]models.Item, 5), // 5 items per lesson Items: make([]models.Item, 5), // 5 items per lesson
} }
for j := range 5 { for j := 0; j < 5; j++ {
item := models.Item{ item := models.Item{
Type: "text", Type: "text",
Items: make([]models.SubItem, 3), // 3 sub-items per item Items: make([]models.SubItem, 3), // 3 sub-items per item
} }
for k := range 3 { for k := 0; k < 3; k++ {
item.Items[k] = models.SubItem{ item.Items[k] = models.SubItem{
Heading: "<h3>Heading " + string(rune(k)) + "</h3>", Heading: "<h3>Heading " + string(rune(k)) + "</h3>",
Paragraph: "<p>Paragraph content with <strong>formatting</strong> for performance testing.</p>", Paragraph: "<p>Paragraph content with <strong>formatting</strong> for performance testing.</p>",
@ -661,11 +670,10 @@ func BenchmarkDocxExporter_ComplexCourse(b *testing.B) {
tempDir := b.TempDir() tempDir := b.TempDir()
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
outputPath := filepath.Join(tempDir, "benchmark-complex.docx") outputPath := filepath.Join(tempDir, "benchmark-complex.docx")
_ = exporter.Export(course, outputPath) _ = exporter.Export(course, outputPath)
// Remove errors are ignored because we're only benchmarking the export os.Remove(outputPath)
// operation itself; cleanup failures don't affect the benchmark metrics.
_ = os.Remove(outputPath)
} }
} }

View File

@ -1,101 +0,0 @@
// Package exporters_test provides examples for the exporters package.
package exporters_test
import (
"fmt"
"log"
"github.com/kjanat/articulate-parser/internal/exporters"
"github.com/kjanat/articulate-parser/internal/models"
"github.com/kjanat/articulate-parser/internal/services"
)
// ExampleNewFactory demonstrates creating an exporter factory.
func ExampleNewFactory() {
htmlCleaner := services.NewHTMLCleaner()
factory := exporters.NewFactory(htmlCleaner)
// Get supported formats
formats := factory.SupportedFormats()
fmt.Printf("Supported formats: %d\n", len(formats))
// Output: Supported formats: 6
}
// ExampleFactory_CreateExporter demonstrates creating exporters.
func ExampleFactory_CreateExporter() {
htmlCleaner := services.NewHTMLCleaner()
factory := exporters.NewFactory(htmlCleaner)
// Create a markdown exporter
exporter, err := factory.CreateExporter("markdown")
if err != nil {
log.Fatal(err)
}
fmt.Printf("Created: %s exporter\n", exporter.SupportedFormat())
// Output: Created: markdown exporter
}
// ExampleFactory_CreateExporter_caseInsensitive demonstrates case-insensitive format names.
func ExampleFactory_CreateExporter_caseInsensitive() {
htmlCleaner := services.NewHTMLCleaner()
factory := exporters.NewFactory(htmlCleaner)
// All these work (case-insensitive)
formats := []string{"MARKDOWN", "Markdown", "markdown", "MD"}
for _, format := range formats {
exporter, _ := factory.CreateExporter(format)
fmt.Printf("%s -> %s\n", format, exporter.SupportedFormat())
}
// Output:
// MARKDOWN -> markdown
// Markdown -> markdown
// markdown -> markdown
// MD -> markdown
}
// ExampleMarkdownExporter_Export demonstrates exporting to Markdown.
func ExampleMarkdownExporter_Export() {
htmlCleaner := services.NewHTMLCleaner()
exporter := exporters.NewMarkdownExporter(htmlCleaner)
course := &models.Course{
ShareID: "example-id",
Course: models.CourseInfo{
Title: "Example Course",
Description: "<p>Course description</p>",
},
}
// Export to markdown file
err := exporter.Export(course, "output.md")
if err != nil {
log.Fatal(err)
}
fmt.Println("Export complete")
// Output: Export complete
}
// ExampleDocxExporter_Export demonstrates exporting to DOCX.
func ExampleDocxExporter_Export() {
htmlCleaner := services.NewHTMLCleaner()
exporter := exporters.NewDocxExporter(htmlCleaner)
course := &models.Course{
ShareID: "example-id",
Course: models.CourseInfo{
Title: "Example Course",
},
}
// Export to Word document
err := exporter.Export(course, "output.docx")
if err != nil {
log.Fatal(err)
}
fmt.Println("DOCX export complete")
// Output: DOCX export complete
}

View File

@ -1,3 +1,5 @@
// Package exporters provides implementations of the Exporter interface
// for converting Articulate Rise courses into various file formats.
package exporters package exporters
import ( import (
@ -8,13 +10,6 @@ import (
"github.com/kjanat/articulate-parser/internal/services" "github.com/kjanat/articulate-parser/internal/services"
) )
// Format constants for supported export formats.
const (
FormatMarkdown = "markdown"
FormatDocx = "docx"
FormatHTML = "html"
)
// Factory implements the ExporterFactory interface. // Factory implements the ExporterFactory interface.
// It creates appropriate exporter instances based on the requested format. // It creates appropriate exporter instances based on the requested format.
type Factory struct { type Factory struct {
@ -38,22 +33,33 @@ func NewFactory(htmlCleaner *services.HTMLCleaner) interfaces.ExporterFactory {
} }
// CreateExporter creates an exporter for the specified format. // CreateExporter creates an exporter for the specified format.
// Format strings are case-insensitive (e.g., "markdown", "DOCX"). // It returns an appropriate exporter implementation based on the format string.
// Format strings are case-insensitive.
//
// Parameters:
// - format: The desired export format (e.g., "markdown", "docx")
//
// Returns:
// - An implementation of the Exporter interface if the format is supported
// - An error if the format is not supported
func (f *Factory) CreateExporter(format string) (interfaces.Exporter, error) { func (f *Factory) CreateExporter(format string) (interfaces.Exporter, error) {
switch strings.ToLower(format) { switch strings.ToLower(format) {
case FormatMarkdown, "md": case "markdown", "md":
return NewMarkdownExporter(f.htmlCleaner), nil return NewMarkdownExporter(f.htmlCleaner), nil
case FormatDocx, "word": case "docx", "word":
return NewDocxExporter(f.htmlCleaner), nil return NewDocxExporter(f.htmlCleaner), nil
case FormatHTML, "htm": case "html", "htm":
return NewHTMLExporter(f.htmlCleaner), nil return NewHTMLExporter(f.htmlCleaner), nil
default: default:
return nil, fmt.Errorf("unsupported export format: %s", format) return nil, fmt.Errorf("unsupported export format: %s", format)
} }
} }
// SupportedFormats returns a list of all supported export formats, // GetSupportedFormats returns a list of all supported export formats.
// including both primary format names and their aliases. // This includes both primary format names and their aliases.
func (f *Factory) SupportedFormats() []string { //
return []string{FormatMarkdown, "md", FormatDocx, "word", FormatHTML, "htm"} // Returns:
// - A string slice containing all supported format names
func (f *Factory) GetSupportedFormats() []string {
return []string{"markdown", "md", "docx", "word", "html", "htm"}
} }

View File

@ -1,3 +1,4 @@
// Package exporters_test provides tests for the exporter factory.
package exporters package exporters
import ( import (
@ -124,7 +125,7 @@ func TestFactory_CreateExporter(t *testing.T) {
} }
// Check supported format // Check supported format
supportedFormat := exporter.SupportedFormat() supportedFormat := exporter.GetSupportedFormat()
if supportedFormat != tc.expectedFormat { if supportedFormat != tc.expectedFormat {
t.Errorf("Expected supported format '%s' for format '%s', got '%s'", tc.expectedFormat, tc.format, supportedFormat) t.Errorf("Expected supported format '%s' for format '%s', got '%s'", tc.expectedFormat, tc.format, supportedFormat)
} }
@ -163,6 +164,7 @@ func TestFactory_CreateExporter_CaseInsensitive(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.format, func(t *testing.T) { t.Run(tc.format, func(t *testing.T) {
exporter, err := factory.CreateExporter(tc.format) exporter, err := factory.CreateExporter(tc.format)
if err != nil { if err != nil {
t.Fatalf("Unexpected error for format '%s': %v", tc.format, err) t.Fatalf("Unexpected error for format '%s': %v", tc.format, err)
} }
@ -171,7 +173,7 @@ func TestFactory_CreateExporter_CaseInsensitive(t *testing.T) {
t.Fatalf("CreateExporter returned nil for format '%s'", tc.format) t.Fatalf("CreateExporter returned nil for format '%s'", tc.format)
} }
supportedFormat := exporter.SupportedFormat() supportedFormat := exporter.GetSupportedFormat()
if supportedFormat != tc.expectedFormat { if supportedFormat != tc.expectedFormat {
t.Errorf("Expected supported format '%s' for format '%s', got '%s'", tc.expectedFormat, tc.format, supportedFormat) t.Errorf("Expected supported format '%s' for format '%s', got '%s'", tc.expectedFormat, tc.format, supportedFormat)
} }
@ -219,15 +221,15 @@ func TestFactory_CreateExporter_ErrorMessages(t *testing.T) {
} }
} }
// TestFactory_SupportedFormats tests the SupportedFormats method. // TestFactory_GetSupportedFormats tests the GetSupportedFormats method.
func TestFactory_SupportedFormats(t *testing.T) { func TestFactory_GetSupportedFormats(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner() htmlCleaner := services.NewHTMLCleaner()
factory := NewFactory(htmlCleaner) factory := NewFactory(htmlCleaner)
formats := factory.SupportedFormats() formats := factory.GetSupportedFormats()
if formats == nil { if formats == nil {
t.Fatal("SupportedFormats() returned nil") t.Fatal("GetSupportedFormats() returned nil")
} }
expected := []string{"markdown", "md", "docx", "word", "html", "htm"} expected := []string{"markdown", "md", "docx", "word", "html", "htm"}
@ -244,22 +246,22 @@ func TestFactory_SupportedFormats(t *testing.T) {
for _, format := range formats { for _, format := range formats {
exporter, err := factory.CreateExporter(format) exporter, err := factory.CreateExporter(format)
if err != nil { if err != nil {
t.Errorf("Format '%s' from SupportedFormats() should be creatable, got error: %v", format, err) t.Errorf("Format '%s' from GetSupportedFormats() should be creatable, got error: %v", format, err)
} }
if exporter == nil { if exporter == nil {
t.Errorf("Format '%s' from SupportedFormats() should create non-nil exporter", format) t.Errorf("Format '%s' from GetSupportedFormats() should create non-nil exporter", format)
} }
} }
} }
// TestFactory_SupportedFormats_Immutable tests that the returned slice is safe to modify. // TestFactory_GetSupportedFormats_Immutable tests that the returned slice is safe to modify.
func TestFactory_SupportedFormats_Immutable(t *testing.T) { func TestFactory_GetSupportedFormats_Immutable(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner() htmlCleaner := services.NewHTMLCleaner()
factory := NewFactory(htmlCleaner) factory := NewFactory(htmlCleaner)
// Get formats twice // Get formats twice
formats1 := factory.SupportedFormats() formats1 := factory.GetSupportedFormats()
formats2 := factory.SupportedFormats() formats2 := factory.GetSupportedFormats()
// Modify first slice // Modify first slice
if len(formats1) > 0 { if len(formats1) > 0 {
@ -268,13 +270,13 @@ func TestFactory_SupportedFormats_Immutable(t *testing.T) {
// Check that second call returns unmodified data // Check that second call returns unmodified data
if len(formats2) > 0 && formats2[0] == "modified" { if len(formats2) > 0 && formats2[0] == "modified" {
t.Error("SupportedFormats() should return independent slices") t.Error("GetSupportedFormats() should return independent slices")
} }
// Verify original functionality still works // Verify original functionality still works
formats3 := factory.SupportedFormats() formats3 := factory.GetSupportedFormats()
if len(formats3) == 0 { if len(formats3) == 0 {
t.Error("SupportedFormats() should still return formats after modification") t.Error("GetSupportedFormats() should still return formats after modification")
} }
} }
@ -434,7 +436,7 @@ func TestFactory_FormatNormalization(t *testing.T) {
t.Fatalf("Failed to create exporter for '%s': %v", tc.input, err) t.Fatalf("Failed to create exporter for '%s': %v", tc.input, err)
} }
format := exporter.SupportedFormat() format := exporter.GetSupportedFormat()
if format != tc.expected { if format != tc.expected {
t.Errorf("Expected format '%s' for input '%s', got '%s'", tc.expected, tc.input, format) t.Errorf("Expected format '%s' for input '%s', got '%s'", tc.expected, tc.input, format)
} }
@ -447,7 +449,8 @@ func BenchmarkFactory_CreateExporter(b *testing.B) {
htmlCleaner := services.NewHTMLCleaner() htmlCleaner := services.NewHTMLCleaner()
factory := NewFactory(htmlCleaner) factory := NewFactory(htmlCleaner)
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = factory.CreateExporter("markdown") _, _ = factory.CreateExporter("markdown")
} }
} }
@ -457,17 +460,19 @@ func BenchmarkFactory_CreateExporter_Docx(b *testing.B) {
htmlCleaner := services.NewHTMLCleaner() htmlCleaner := services.NewHTMLCleaner()
factory := NewFactory(htmlCleaner) factory := NewFactory(htmlCleaner)
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = factory.CreateExporter("docx") _, _ = factory.CreateExporter("docx")
} }
} }
// BenchmarkFactory_SupportedFormats benchmarks the SupportedFormats method. // BenchmarkFactory_GetSupportedFormats benchmarks the GetSupportedFormats method.
func BenchmarkFactory_SupportedFormats(b *testing.B) { func BenchmarkFactory_GetSupportedFormats(b *testing.B) {
htmlCleaner := services.NewHTMLCleaner() htmlCleaner := services.NewHTMLCleaner()
factory := NewFactory(htmlCleaner) factory := NewFactory(htmlCleaner)
for b.Loop() { b.ResetTimer()
_ = factory.SupportedFormats() for i := 0; i < b.N; i++ {
_ = factory.GetSupportedFormats()
} }
} }

View File

@ -1,30 +1,24 @@
// Package exporters provides implementations of the Exporter interface
// for converting Articulate Rise courses into various file formats.
package exporters package exporters
import ( import (
_ "embed" "bytes"
"fmt" "fmt"
"html/template" "html"
"io"
"os" "os"
"strings"
"github.com/kjanat/articulate-parser/internal/interfaces" "github.com/kjanat/articulate-parser/internal/interfaces"
"github.com/kjanat/articulate-parser/internal/models" "github.com/kjanat/articulate-parser/internal/models"
"github.com/kjanat/articulate-parser/internal/services" "github.com/kjanat/articulate-parser/internal/services"
) )
//go:embed html_styles.css
var defaultCSS string
//go:embed html_template.html
var htmlTemplate string
// HTMLExporter implements the Exporter interface for HTML format. // HTMLExporter implements the Exporter interface for HTML format.
// It converts Articulate Rise course data into a structured HTML document using templates. // It converts Articulate Rise course data into a structured HTML document.
type HTMLExporter struct { type HTMLExporter struct {
// htmlCleaner is used to convert HTML content to plain text when needed // htmlCleaner is used to convert HTML content to plain text when needed
htmlCleaner *services.HTMLCleaner htmlCleaner *services.HTMLCleaner
// tmpl holds the parsed HTML template
tmpl *template.Template
} }
// NewHTMLExporter creates a new HTMLExporter instance. // NewHTMLExporter creates a new HTMLExporter instance.
@ -36,21 +30,8 @@ type HTMLExporter struct {
// Returns: // Returns:
// - An implementation of the Exporter interface for HTML format // - An implementation of the Exporter interface for HTML format
func NewHTMLExporter(htmlCleaner *services.HTMLCleaner) interfaces.Exporter { func NewHTMLExporter(htmlCleaner *services.HTMLCleaner) interfaces.Exporter {
// Parse the template with custom functions
funcMap := template.FuncMap{
"safeHTML": func(s string) template.HTML {
return template.HTML(s) // #nosec G203 - HTML content is from trusted course data
},
"safeCSS": func(s string) template.CSS {
return template.CSS(s) // #nosec G203 - CSS content is from trusted embedded file
},
}
tmpl := template.Must(template.New("html").Funcs(funcMap).Parse(htmlTemplate))
return &HTMLExporter{ return &HTMLExporter{
htmlCleaner: htmlCleaner, htmlCleaner: htmlCleaner,
tmpl: tmpl,
} }
} }
@ -65,41 +46,431 @@ func NewHTMLExporter(htmlCleaner *services.HTMLCleaner) interfaces.Exporter {
// Returns: // Returns:
// - An error if writing to the output file fails // - An error if writing to the output file fails
func (e *HTMLExporter) Export(course *models.Course, outputPath string) error { func (e *HTMLExporter) Export(course *models.Course, outputPath string) error {
f, err := os.Create(outputPath) var buf bytes.Buffer
if err != nil {
return fmt.Errorf("failed to create file: %w", err)
}
defer f.Close()
return e.WriteHTML(f, course) // Write HTML document structure
buf.WriteString("<!DOCTYPE html>\n")
buf.WriteString("<html lang=\"en\">\n")
buf.WriteString("<head>\n")
buf.WriteString(" <meta charset=\"UTF-8\">\n")
buf.WriteString(" <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n")
buf.WriteString(fmt.Sprintf(" <title>%s</title>\n", html.EscapeString(course.Course.Title)))
buf.WriteString(" <style>\n")
buf.WriteString(e.getDefaultCSS())
buf.WriteString(" </style>\n")
buf.WriteString("</head>\n")
buf.WriteString("<body>\n")
// Write course header
buf.WriteString(fmt.Sprintf(" <header>\n <h1>%s</h1>\n", html.EscapeString(course.Course.Title)))
if course.Course.Description != "" {
buf.WriteString(fmt.Sprintf(" <div class=\"course-description\">%s</div>\n", course.Course.Description))
}
buf.WriteString(" </header>\n\n")
// Add metadata section
buf.WriteString(" <section class=\"course-info\">\n")
buf.WriteString(" <h2>Course Information</h2>\n")
buf.WriteString(" <ul>\n")
buf.WriteString(fmt.Sprintf(" <li><strong>Course ID:</strong> %s</li>\n", html.EscapeString(course.Course.ID)))
buf.WriteString(fmt.Sprintf(" <li><strong>Share ID:</strong> %s</li>\n", html.EscapeString(course.ShareID)))
buf.WriteString(fmt.Sprintf(" <li><strong>Navigation Mode:</strong> %s</li>\n", html.EscapeString(course.Course.NavigationMode)))
if course.Course.ExportSettings != nil {
buf.WriteString(fmt.Sprintf(" <li><strong>Export Format:</strong> %s</li>\n", html.EscapeString(course.Course.ExportSettings.Format)))
}
buf.WriteString(" </ul>\n")
buf.WriteString(" </section>\n\n")
// Process lessons
lessonCounter := 0
for _, lesson := range course.Course.Lessons {
if lesson.Type == "section" {
buf.WriteString(fmt.Sprintf(" <section class=\"course-section\">\n <h2>%s</h2>\n </section>\n\n", html.EscapeString(lesson.Title)))
continue
} }
// WriteHTML writes the HTML content to an io.Writer. lessonCounter++
// This allows for better testability and flexibility in output destinations. buf.WriteString(fmt.Sprintf(" <section class=\"lesson\">\n <h3>Lesson %d: %s</h3>\n", lessonCounter, html.EscapeString(lesson.Title)))
//
// Parameters:
// - w: The writer to output HTML content to
// - course: The course data model to export
//
// Returns:
// - An error if writing fails
func (e *HTMLExporter) WriteHTML(w io.Writer, course *models.Course) error {
// Prepare template data
data := prepareTemplateData(course, e.htmlCleaner)
// Execute template if lesson.Description != "" {
if err := e.tmpl.Execute(w, data); err != nil { buf.WriteString(fmt.Sprintf(" <div class=\"lesson-description\">%s</div>\n", lesson.Description))
return fmt.Errorf("failed to execute template: %w", err)
} }
return nil // Process lesson items
for _, item := range lesson.Items {
e.processItemToHTML(&buf, item)
} }
// SupportedFormat returns the format name this exporter supports buf.WriteString(" </section>\n\n")
}
buf.WriteString("</body>\n")
buf.WriteString("</html>\n")
return os.WriteFile(outputPath, buf.Bytes(), 0644)
}
// GetSupportedFormat returns the format name this exporter supports
// It indicates the file format that the HTMLExporter can generate. // It indicates the file format that the HTMLExporter can generate.
// //
// Returns: // Returns:
// - A string representing the supported format ("html") // - A string representing the supported format ("html")
func (e *HTMLExporter) SupportedFormat() string { func (e *HTMLExporter) GetSupportedFormat() string {
return FormatHTML return "html"
}
// getDefaultCSS returns basic CSS styling for the HTML document
func (e *HTMLExporter) getDefaultCSS() string {
return `
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
line-height: 1.6;
color: #333;
max-width: 800px;
margin: 0 auto;
padding: 20px;
background-color: #f9f9f9;
}
header {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
padding: 2rem;
border-radius: 10px;
margin-bottom: 2rem;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
header h1 {
margin: 0;
font-size: 2.5rem;
font-weight: 300;
}
.course-description {
margin-top: 1rem;
font-size: 1.1rem;
opacity: 0.9;
}
.course-info {
background: white;
padding: 1.5rem;
border-radius: 8px;
margin-bottom: 2rem;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.course-info h2 {
margin-top: 0;
color: #4a5568;
border-bottom: 2px solid #e2e8f0;
padding-bottom: 0.5rem;
}
.course-info ul {
list-style: none;
padding: 0;
}
.course-info li {
margin: 0.5rem 0;
padding: 0.5rem;
background: #f7fafc;
border-radius: 4px;
}
.course-section {
background: #4299e1;
color: white;
padding: 1.5rem;
border-radius: 8px;
margin: 2rem 0;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.course-section h2 {
margin: 0;
font-weight: 400;
}
.lesson {
background: white;
padding: 2rem;
border-radius: 8px;
margin: 2rem 0;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
border-left: 4px solid #4299e1;
}
.lesson h3 {
margin-top: 0;
color: #2d3748;
font-size: 1.5rem;
}
.lesson-description {
margin: 1rem 0;
padding: 1rem;
background: #f7fafc;
border-radius: 4px;
border-left: 3px solid #4299e1;
}
.item {
margin: 1.5rem 0;
padding: 1rem;
border-radius: 6px;
background: #fafafa;
border: 1px solid #e2e8f0;
}
.item h4 {
margin-top: 0;
color: #4a5568;
font-size: 1.2rem;
text-transform: capitalize;
}
.text-item {
background: #f0fff4;
border-left: 3px solid #48bb78;
}
.list-item {
background: #fffaf0;
border-left: 3px solid #ed8936;
}
.knowledge-check {
background: #e6fffa;
border-left: 3px solid #38b2ac;
}
.multimedia-item {
background: #faf5ff;
border-left: 3px solid #9f7aea;
}
.interactive-item {
background: #fff5f5;
border-left: 3px solid #f56565;
}
.unknown-item {
background: #f7fafc;
border-left: 3px solid #a0aec0;
}
.answers {
margin: 1rem 0;
}
.answers h5 {
margin: 0.5rem 0;
color: #4a5568;
}
.answers ol {
margin: 0.5rem 0;
padding-left: 1.5rem;
}
.answers li {
margin: 0.3rem 0;
padding: 0.3rem;
}
.correct-answer {
background: #c6f6d5;
border-radius: 3px;
font-weight: bold;
}
.correct-answer::after {
content: " ✓";
color: #38a169;
}
.feedback {
margin: 1rem 0;
padding: 1rem;
background: #edf2f7;
border-radius: 4px;
border-left: 3px solid #4299e1;
font-style: italic;
}
.media-info {
background: #edf2f7;
padding: 1rem;
border-radius: 4px;
margin: 0.5rem 0;
}
.media-info strong {
color: #4a5568;
}
hr {
border: none;
height: 2px;
background: linear-gradient(to right, #667eea, #764ba2);
margin: 2rem 0;
border-radius: 1px;
}
ul {
padding-left: 1.5rem;
}
li {
margin: 0.5rem 0;
}
`
}
// processItemToHTML converts a course item into HTML format
// and appends it to the provided buffer. It handles different item types
// with appropriate HTML formatting.
//
// Parameters:
// - buf: The buffer to write the HTML content to
// - item: The course item to process
func (e *HTMLExporter) processItemToHTML(buf *bytes.Buffer, item models.Item) {
switch strings.ToLower(item.Type) {
case "text":
e.processTextItem(buf, item)
case "list":
e.processListItem(buf, item)
case "knowledgecheck":
e.processKnowledgeCheckItem(buf, item)
case "multimedia":
e.processMultimediaItem(buf, item)
case "image":
e.processImageItem(buf, item)
case "interactive":
e.processInteractiveItem(buf, item)
case "divider":
e.processDividerItem(buf)
default:
e.processUnknownItem(buf, item)
}
}
// processTextItem handles text content with headings and paragraphs
func (e *HTMLExporter) processTextItem(buf *bytes.Buffer, item models.Item) {
buf.WriteString(" <div class=\"item text-item\">\n")
buf.WriteString(" <h4>Text Content</h4>\n")
for _, subItem := range item.Items {
if subItem.Heading != "" {
buf.WriteString(fmt.Sprintf(" <h5>%s</h5>\n", subItem.Heading))
}
if subItem.Paragraph != "" {
buf.WriteString(fmt.Sprintf(" <div>%s</div>\n", subItem.Paragraph))
}
}
buf.WriteString(" </div>\n\n")
}
// processListItem handles list content
func (e *HTMLExporter) processListItem(buf *bytes.Buffer, item models.Item) {
buf.WriteString(" <div class=\"item list-item\">\n")
buf.WriteString(" <h4>List</h4>\n")
buf.WriteString(" <ul>\n")
for _, subItem := range item.Items {
if subItem.Paragraph != "" {
cleanText := e.htmlCleaner.CleanHTML(subItem.Paragraph)
buf.WriteString(fmt.Sprintf(" <li>%s</li>\n", html.EscapeString(cleanText)))
}
}
buf.WriteString(" </ul>\n")
buf.WriteString(" </div>\n\n")
}
// processKnowledgeCheckItem handles quiz questions and answers
func (e *HTMLExporter) processKnowledgeCheckItem(buf *bytes.Buffer, item models.Item) {
buf.WriteString(" <div class=\"item knowledge-check\">\n")
buf.WriteString(" <h4>Knowledge Check</h4>\n")
for _, subItem := range item.Items {
if subItem.Title != "" {
buf.WriteString(fmt.Sprintf(" <p><strong>Question:</strong> %s</p>\n", subItem.Title))
}
if len(subItem.Answers) > 0 {
e.processAnswers(buf, subItem.Answers)
}
if subItem.Feedback != "" {
buf.WriteString(fmt.Sprintf(" <div class=\"feedback\"><strong>Feedback:</strong> %s</div>\n", subItem.Feedback))
}
}
buf.WriteString(" </div>\n\n")
}
// processMultimediaItem handles multimedia content like videos
func (e *HTMLExporter) processMultimediaItem(buf *bytes.Buffer, item models.Item) {
buf.WriteString(" <div class=\"item multimedia-item\">\n")
buf.WriteString(" <h4>Media Content</h4>\n")
for _, subItem := range item.Items {
if subItem.Title != "" {
buf.WriteString(fmt.Sprintf(" <h5>%s</h5>\n", subItem.Title))
}
if subItem.Media != nil {
if subItem.Media.Video != nil {
buf.WriteString(" <div class=\"media-info\">\n")
buf.WriteString(fmt.Sprintf(" <p><strong>Video:</strong> %s</p>\n", html.EscapeString(subItem.Media.Video.OriginalUrl)))
if subItem.Media.Video.Duration > 0 {
buf.WriteString(fmt.Sprintf(" <p><strong>Duration:</strong> %d seconds</p>\n", subItem.Media.Video.Duration))
}
buf.WriteString(" </div>\n")
}
}
if subItem.Caption != "" {
buf.WriteString(fmt.Sprintf(" <div><em>%s</em></div>\n", subItem.Caption))
}
}
buf.WriteString(" </div>\n\n")
}
// processImageItem handles image content
func (e *HTMLExporter) processImageItem(buf *bytes.Buffer, item models.Item) {
buf.WriteString(" <div class=\"item multimedia-item\">\n")
buf.WriteString(" <h4>Image</h4>\n")
for _, subItem := range item.Items {
if subItem.Media != nil && subItem.Media.Image != nil {
buf.WriteString(" <div class=\"media-info\">\n")
buf.WriteString(fmt.Sprintf(" <p><strong>Image:</strong> %s</p>\n", html.EscapeString(subItem.Media.Image.OriginalUrl)))
buf.WriteString(" </div>\n")
}
if subItem.Caption != "" {
buf.WriteString(fmt.Sprintf(" <div><em>%s</em></div>\n", subItem.Caption))
}
}
buf.WriteString(" </div>\n\n")
}
// processInteractiveItem handles interactive content
func (e *HTMLExporter) processInteractiveItem(buf *bytes.Buffer, item models.Item) {
buf.WriteString(" <div class=\"item interactive-item\">\n")
buf.WriteString(" <h4>Interactive Content</h4>\n")
for _, subItem := range item.Items {
if subItem.Title != "" {
buf.WriteString(fmt.Sprintf(" <p><strong>%s</strong></p>\n", subItem.Title))
}
if subItem.Paragraph != "" {
buf.WriteString(fmt.Sprintf(" <div>%s</div>\n", subItem.Paragraph))
}
}
buf.WriteString(" </div>\n\n")
}
// processDividerItem handles divider elements
func (e *HTMLExporter) processDividerItem(buf *bytes.Buffer) {
buf.WriteString(" <hr>\n\n")
}
// processUnknownItem handles unknown or unsupported item types
func (e *HTMLExporter) processUnknownItem(buf *bytes.Buffer, item models.Item) {
if len(item.Items) > 0 {
buf.WriteString(" <div class=\"item unknown-item\">\n")
buf.WriteString(fmt.Sprintf(" <h4>%s Content</h4>\n", strings.Title(item.Type)))
for _, subItem := range item.Items {
e.processGenericSubItem(buf, subItem)
}
buf.WriteString(" </div>\n\n")
}
}
// processGenericSubItem processes sub-items for unknown types
func (e *HTMLExporter) processGenericSubItem(buf *bytes.Buffer, subItem models.SubItem) {
if subItem.Title != "" {
buf.WriteString(fmt.Sprintf(" <p><strong>%s</strong></p>\n", subItem.Title))
}
if subItem.Paragraph != "" {
buf.WriteString(fmt.Sprintf(" <div>%s</div>\n", subItem.Paragraph))
}
}
// processAnswers processes answer choices for quiz questions
func (e *HTMLExporter) processAnswers(buf *bytes.Buffer, answers []models.Answer) {
buf.WriteString(" <div class=\"answers\">\n")
buf.WriteString(" <h5>Answers:</h5>\n")
buf.WriteString(" <ol>\n")
for _, answer := range answers {
cssClass := ""
if answer.Correct {
cssClass = " class=\"correct-answer\""
}
buf.WriteString(fmt.Sprintf(" <li%s>%s</li>\n", cssClass, html.EscapeString(answer.Title)))
}
buf.WriteString(" </ol>\n")
buf.WriteString(" </div>\n")
} }

View File

@ -1,173 +0,0 @@
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
line-height: 1.6;
color: #333;
max-width: 800px;
margin: 0 auto;
padding: 20px;
background-color: #f9f9f9;
}
header {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
padding: 2rem;
border-radius: 10px;
margin-bottom: 2rem;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
header h1 {
margin: 0;
font-size: 2.5rem;
font-weight: 300;
}
.course-description {
margin-top: 1rem;
font-size: 1.1rem;
opacity: 0.9;
}
.course-info {
background: white;
padding: 1.5rem;
border-radius: 8px;
margin-bottom: 2rem;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.course-info h2 {
margin-top: 0;
color: #4a5568;
border-bottom: 2px solid #e2e8f0;
padding-bottom: 0.5rem;
}
.course-info ul {
list-style: none;
padding: 0;
}
.course-info li {
margin: 0.5rem 0;
padding: 0.5rem;
background: #f7fafc;
border-radius: 4px;
}
.course-section {
background: #4299e1;
color: white;
padding: 1.5rem;
border-radius: 8px;
margin: 2rem 0;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.course-section h2 {
margin: 0;
font-weight: 400;
}
.lesson {
background: white;
padding: 2rem;
border-radius: 8px;
margin: 2rem 0;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
border-left: 4px solid #4299e1;
}
.lesson h3 {
margin-top: 0;
color: #2d3748;
font-size: 1.5rem;
}
.lesson-description {
margin: 1rem 0;
padding: 1rem;
background: #f7fafc;
border-radius: 4px;
border-left: 3px solid #4299e1;
}
.item {
margin: 1.5rem 0;
padding: 1rem;
border-radius: 6px;
background: #fafafa;
border: 1px solid #e2e8f0;
}
.item h4 {
margin-top: 0;
color: #4a5568;
font-size: 1.2rem;
text-transform: capitalize;
}
.text-item {
background: #f0fff4;
border-left: 3px solid #48bb78;
}
.list-item {
background: #fffaf0;
border-left: 3px solid #ed8936;
}
.knowledge-check {
background: #e6fffa;
border-left: 3px solid #38b2ac;
}
.multimedia-item {
background: #faf5ff;
border-left: 3px solid #9f7aea;
}
.interactive-item {
background: #fff5f5;
border-left: 3px solid #f56565;
}
.unknown-item {
background: #f7fafc;
border-left: 3px solid #a0aec0;
}
.answers {
margin: 1rem 0;
}
.answers h5 {
margin: 0.5rem 0;
color: #4a5568;
}
.answers ol {
margin: 0.5rem 0;
padding-left: 1.5rem;
}
.answers li {
margin: 0.3rem 0;
padding: 0.3rem;
}
.correct-answer {
background: #c6f6d5;
border-radius: 3px;
font-weight: bold;
}
.correct-answer::after {
content: " ✓";
color: #38a169;
}
.feedback {
margin: 1rem 0;
padding: 1rem;
background: #edf2f7;
border-radius: 4px;
border-left: 3px solid #4299e1;
font-style: italic;
}
.media-info {
background: #edf2f7;
padding: 1rem;
border-radius: 4px;
margin: 0.5rem 0;
}
.media-info strong {
color: #4a5568;
}
hr {
border: none;
height: 2px;
background: linear-gradient(to right, #667eea, #764ba2);
margin: 2rem 0;
border-radius: 1px;
}
ul {
padding-left: 1.5rem;
}
li {
margin: 0.5rem 0;
}

View File

@ -1,183 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>{{.Course.Title}}</title>
<style>
{{safeCSS .CSS}}
</style>
</head>
<body>
<header>
<h1>{{.Course.Title}}</h1>
{{if .Course.Description}}
<div class="course-description">{{safeHTML .Course.Description}}</div>
{{end}}
</header>
<section class="course-info">
<h2>Course Information</h2>
<ul>
<li><strong>Course ID:</strong> {{.Course.ID}}</li>
<li><strong>Share ID:</strong> {{.ShareID}}</li>
<li><strong>Navigation Mode:</strong> {{.Course.NavigationMode}}</li>
{{if .Course.ExportSettings}}
<li><strong>Export Format:</strong> {{.Course.ExportSettings.Format}}</li>
{{end}}
</ul>
</section>
{{range .Sections}}
{{if eq .Type "section"}}
<section class="course-section">
<h2>{{.Title}}</h2>
</section>
{{else}}
<section class="lesson">
<h3>Lesson {{.Number}}: {{.Title}}</h3>
{{if .Description}}
<div class="lesson-description">{{safeHTML .Description}}</div>
{{end}}
{{range .Items}}
{{template "item" .}}
{{end}}
</section>
{{end}}
{{end}}
</body>
</html>
{{define "item"}}
{{if eq .Type "text"}}{{template "textItem" .}}
{{else if eq .Type "list"}}{{template "listItem" .}}
{{else if eq .Type "knowledgecheck"}}{{template "knowledgeCheckItem" .}}
{{else if eq .Type "multimedia"}}{{template "multimediaItem" .}}
{{else if eq .Type "image"}}{{template "imageItem" .}}
{{else if eq .Type "interactive"}}{{template "interactiveItem" .}}
{{else if eq .Type "divider"}}{{template "dividerItem" .}}
{{else}}{{template "unknownItem" .}}
{{end}}
{{end}}
{{define "textItem"}}
<div class="item text-item">
<h4>Text Content</h4>
{{range .Items}}
{{if .Heading}}
{{safeHTML .Heading}}
{{end}}
{{if .Paragraph}}
<div>{{safeHTML .Paragraph}}</div>
{{end}}
{{end}}
</div>
{{end}}
{{define "listItem"}}
<div class="item list-item">
<h4>List</h4>
<ul>
{{range .Items}}
{{if .Paragraph}}
<li>{{.CleanText}}</li>
{{end}}
{{end}}
</ul>
</div>
{{end}}
{{define "knowledgeCheckItem"}}
<div class="item knowledge-check">
<h4>Knowledge Check</h4>
{{range .Items}}
{{if .Title}}
<p><strong>Question:</strong> {{safeHTML .Title}}</p>
{{end}}
{{if .Answers}}
<div class="answers">
<h5>Answers:</h5>
<ol>
{{range .Answers}}
<li{{if .Correct}} class="correct-answer"{{end}}>{{.Title}}</li>
{{end}}
</ol>
</div>
{{end}}
{{if .Feedback}}
<div class="feedback"><strong>Feedback:</strong> {{safeHTML .Feedback}}</div>
{{end}}
{{end}}
</div>
{{end}}
{{define "multimediaItem"}}
<div class="item multimedia-item">
<h4>Media Content</h4>
{{range .Items}}
{{if .Title}}
<h5>{{.Title}}</h5>
{{end}}
{{if .Media}}
{{if .Media.Video}}
<div class="media-info">
<p><strong>Video:</strong> {{.Media.Video.OriginalURL}}</p>
{{if gt .Media.Video.Duration 0}}
<p><strong>Duration:</strong> {{.Media.Video.Duration}} seconds</p>
{{end}}
</div>
{{end}}
{{end}}
{{if .Caption}}
<div><em>{{.Caption}}</em></div>
{{end}}
{{end}}
</div>
{{end}}
{{define "imageItem"}}
<div class="item multimedia-item">
<h4>Image</h4>
{{range .Items}}
{{if and .Media .Media.Image}}
<div class="media-info">
<p><strong>Image:</strong> {{.Media.Image.OriginalURL}}</p>
</div>
{{end}}
{{if .Caption}}
<div><em>{{.Caption}}</em></div>
{{end}}
{{end}}
</div>
{{end}}
{{define "interactiveItem"}}
<div class="item interactive-item">
<h4>Interactive Content</h4>
{{range .Items}}
{{if .Title}}
<p><strong>{{.Title}}</strong></p>
{{end}}
{{if .Paragraph}}
<div>{{safeHTML .Paragraph}}</div>
{{end}}
{{end}}
</div>
{{end}}
{{define "dividerItem"}}
<hr>
{{end}}
{{define "unknownItem"}}
<div class="item unknown-item">
<h4>{{.TypeTitle}} Content</h4>
{{range .Items}}
{{if .Title}}
<p><strong>{{.Title}}</strong></p>
{{end}}
{{if .Paragraph}}
<div>{{safeHTML .Paragraph}}</div>
{{end}}
{{end}}
</div>
{{end}}

View File

@ -1,131 +0,0 @@
package exporters
import (
"strings"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"github.com/kjanat/articulate-parser/internal/models"
"github.com/kjanat/articulate-parser/internal/services"
)
// Item type constants.
const (
itemTypeText = "text"
itemTypeList = "list"
itemTypeKnowledgeCheck = "knowledgecheck"
itemTypeMultimedia = "multimedia"
itemTypeImage = "image"
itemTypeInteractive = "interactive"
itemTypeDivider = "divider"
)
// templateData represents the data structure passed to the HTML template.
type templateData struct {
Course models.CourseInfo
ShareID string
Sections []templateSection
CSS string
}
// templateSection represents a course section or lesson.
type templateSection struct {
Type string
Title string
Number int
Description string
Items []templateItem
}
// templateItem represents a course item with preprocessed data.
type templateItem struct {
Type string
TypeTitle string
Items []templateSubItem
}
// templateSubItem represents a sub-item with preprocessed data.
type templateSubItem struct {
Heading string
Paragraph string
Title string
Caption string
CleanText string
Answers []models.Answer
Feedback string
Media *models.Media
}
// prepareTemplateData converts a Course model into template-friendly data.
func prepareTemplateData(course *models.Course, htmlCleaner *services.HTMLCleaner) *templateData {
data := &templateData{
Course: course.Course,
ShareID: course.ShareID,
Sections: make([]templateSection, 0, len(course.Course.Lessons)),
CSS: defaultCSS,
}
lessonCounter := 0
for _, lesson := range course.Course.Lessons {
section := templateSection{
Type: lesson.Type,
Title: lesson.Title,
Description: lesson.Description,
}
if lesson.Type != "section" {
lessonCounter++
section.Number = lessonCounter
section.Items = prepareItems(lesson.Items, htmlCleaner)
}
data.Sections = append(data.Sections, section)
}
return data
}
// prepareItems converts model Items to template Items.
func prepareItems(items []models.Item, htmlCleaner *services.HTMLCleaner) []templateItem {
result := make([]templateItem, 0, len(items))
for _, item := range items {
tItem := templateItem{
Type: strings.ToLower(item.Type),
Items: make([]templateSubItem, 0, len(item.Items)),
}
// Set type title for unknown items
if tItem.Type != itemTypeText && tItem.Type != itemTypeList && tItem.Type != itemTypeKnowledgeCheck &&
tItem.Type != itemTypeMultimedia && tItem.Type != itemTypeImage && tItem.Type != itemTypeInteractive &&
tItem.Type != itemTypeDivider {
caser := cases.Title(language.English)
tItem.TypeTitle = caser.String(item.Type)
}
// Process sub-items
for _, subItem := range item.Items {
tSubItem := templateSubItem{
Heading: subItem.Heading,
Paragraph: subItem.Paragraph,
Title: subItem.Title,
Caption: subItem.Caption,
Answers: subItem.Answers,
Feedback: subItem.Feedback,
Media: subItem.Media,
}
// Clean HTML for list items
if tItem.Type == itemTypeList && subItem.Paragraph != "" {
tSubItem.CleanText = htmlCleaner.CleanHTML(subItem.Paragraph)
}
tItem.Items = append(tItem.Items, tSubItem)
}
result = append(result, tItem)
}
return result
}

View File

@ -1,6 +1,8 @@
// Package exporters_test provides tests for the html exporter.
package exporters package exporters
import ( import (
"bytes"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -28,19 +30,15 @@ func TestNewHTMLExporter(t *testing.T) {
if htmlExporter.htmlCleaner == nil { if htmlExporter.htmlCleaner == nil {
t.Error("htmlCleaner should not be nil") t.Error("htmlCleaner should not be nil")
} }
if htmlExporter.tmpl == nil {
t.Error("template should not be nil")
}
} }
// TestHTMLExporter_SupportedFormat tests the SupportedFormat method. // TestHTMLExporter_GetSupportedFormat tests the GetSupportedFormat method.
func TestHTMLExporter_SupportedFormat(t *testing.T) { func TestHTMLExporter_GetSupportedFormat(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner() htmlCleaner := services.NewHTMLCleaner()
exporter := NewHTMLExporter(htmlCleaner) exporter := NewHTMLExporter(htmlCleaner)
expected := "html" expected := "html"
result := exporter.SupportedFormat() result := exporter.GetSupportedFormat()
if result != expected { if result != expected {
t.Errorf("Expected format '%s', got '%s'", expected, result) t.Errorf("Expected format '%s', got '%s'", expected, result)
@ -121,7 +119,6 @@ func TestHTMLExporter_Export(t *testing.T) {
} }
if !strings.Contains(contentStr, "font-family") { if !strings.Contains(contentStr, "font-family") {
t.Logf("Generated HTML (first 500 chars):\n%s", contentStr[:min(500, len(contentStr))])
t.Error("Output should contain CSS font-family") t.Error("Output should contain CSS font-family")
} }
} }
@ -142,7 +139,409 @@ func TestHTMLExporter_Export_InvalidPath(t *testing.T) {
} }
} }
// TestHTMLExporter_ComplexCourse tests export of a course with complex content. // TestHTMLExporter_ProcessTextItem tests the processTextItem method.
func TestHTMLExporter_ProcessTextItem(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner()
exporter := &HTMLExporter{htmlCleaner: htmlCleaner}
var buf bytes.Buffer
item := models.Item{
Type: "text",
Items: []models.SubItem{
{
Heading: "<h1>Test Heading</h1>",
Paragraph: "<p>Test paragraph with <strong>bold</strong> text.</p>",
},
{
Paragraph: "<p>Another paragraph.</p>",
},
},
}
exporter.processTextItem(&buf, item)
result := buf.String()
if !strings.Contains(result, "text-item") {
t.Error("Should contain text-item CSS class")
}
if !strings.Contains(result, "Text Content") {
t.Error("Should contain text content heading")
}
if !strings.Contains(result, "<h1>Test Heading</h1>") {
t.Error("Should preserve HTML heading")
}
if !strings.Contains(result, "<strong>bold</strong>") {
t.Error("Should preserve HTML formatting in paragraph")
}
}
// TestHTMLExporter_ProcessListItem tests the processListItem method.
func TestHTMLExporter_ProcessListItem(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner()
exporter := &HTMLExporter{htmlCleaner: htmlCleaner}
var buf bytes.Buffer
item := models.Item{
Type: "list",
Items: []models.SubItem{
{Paragraph: "<p>First item</p>"},
{Paragraph: "<p>Second item with <em>emphasis</em></p>"},
{Paragraph: "<p>Third item</p>"},
},
}
exporter.processListItem(&buf, item)
result := buf.String()
if !strings.Contains(result, "list-item") {
t.Error("Should contain list-item CSS class")
}
if !strings.Contains(result, "<ul>") {
t.Error("Should contain unordered list")
}
if !strings.Contains(result, "<li>First item</li>") {
t.Error("Should contain first list item")
}
if !strings.Contains(result, "<li>Second item with emphasis</li>") {
t.Error("Should contain second list item with cleaned HTML")
}
if !strings.Contains(result, "<li>Third item</li>") {
t.Error("Should contain third list item")
}
}
// TestHTMLExporter_ProcessKnowledgeCheckItem tests the processKnowledgeCheckItem method.
func TestHTMLExporter_ProcessKnowledgeCheckItem(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner()
exporter := &HTMLExporter{htmlCleaner: htmlCleaner}
var buf bytes.Buffer
item := models.Item{
Type: "knowledgeCheck",
Items: []models.SubItem{
{
Title: "<p>What is the correct answer?</p>",
Answers: []models.Answer{
{Title: "Wrong answer", Correct: false},
{Title: "Correct answer", Correct: true},
{Title: "Another wrong answer", Correct: false},
},
Feedback: "<p>Great job! This is the feedback.</p>",
},
},
}
exporter.processKnowledgeCheckItem(&buf, item)
result := buf.String()
if !strings.Contains(result, "knowledge-check") {
t.Error("Should contain knowledge-check CSS class")
}
if !strings.Contains(result, "Knowledge Check") {
t.Error("Should contain knowledge check heading")
}
if !strings.Contains(result, "What is the correct answer?") {
t.Error("Should contain question text")
}
if !strings.Contains(result, "Wrong answer") {
t.Error("Should contain first answer")
}
if !strings.Contains(result, "correct-answer") {
t.Error("Should mark correct answer with CSS class")
}
if !strings.Contains(result, "Feedback") {
t.Error("Should contain feedback section")
}
}
// TestHTMLExporter_ProcessMultimediaItem tests the processMultimediaItem method.
func TestHTMLExporter_ProcessMultimediaItem(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner()
exporter := &HTMLExporter{htmlCleaner: htmlCleaner}
var buf bytes.Buffer
item := models.Item{
Type: "multimedia",
Items: []models.SubItem{
{
Title: "<p>Video Title</p>",
Media: &models.Media{
Video: &models.VideoMedia{
OriginalUrl: "https://example.com/video.mp4",
Duration: 120,
},
},
Caption: "<p>Video caption</p>",
},
},
}
exporter.processMultimediaItem(&buf, item)
result := buf.String()
if !strings.Contains(result, "multimedia-item") {
t.Error("Should contain multimedia-item CSS class")
}
if !strings.Contains(result, "Media Content") {
t.Error("Should contain media content heading")
}
if !strings.Contains(result, "Video Title") {
t.Error("Should contain video title")
}
if !strings.Contains(result, "https://example.com/video.mp4") {
t.Error("Should contain video URL")
}
if !strings.Contains(result, "120 seconds") {
t.Error("Should contain video duration")
}
if !strings.Contains(result, "Video caption") {
t.Error("Should contain video caption")
}
}
// TestHTMLExporter_ProcessImageItem tests the processImageItem method.
func TestHTMLExporter_ProcessImageItem(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner()
exporter := &HTMLExporter{htmlCleaner: htmlCleaner}
var buf bytes.Buffer
item := models.Item{
Type: "image",
Items: []models.SubItem{
{
Media: &models.Media{
Image: &models.ImageMedia{
OriginalUrl: "https://example.com/image.png",
},
},
Caption: "<p>Image caption</p>",
},
},
}
exporter.processImageItem(&buf, item)
result := buf.String()
if !strings.Contains(result, "multimedia-item") {
t.Error("Should contain multimedia-item CSS class")
}
if !strings.Contains(result, "Image") {
t.Error("Should contain image heading")
}
if !strings.Contains(result, "https://example.com/image.png") {
t.Error("Should contain image URL")
}
if !strings.Contains(result, "Image caption") {
t.Error("Should contain image caption")
}
}
// TestHTMLExporter_ProcessInteractiveItem tests the processInteractiveItem method.
func TestHTMLExporter_ProcessInteractiveItem(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner()
exporter := &HTMLExporter{htmlCleaner: htmlCleaner}
var buf bytes.Buffer
item := models.Item{
Type: "interactive",
Items: []models.SubItem{
{
Title: "<p>Interactive element title</p>",
Paragraph: "<p>Interactive content description</p>",
},
},
}
exporter.processInteractiveItem(&buf, item)
result := buf.String()
if !strings.Contains(result, "interactive-item") {
t.Error("Should contain interactive-item CSS class")
}
if !strings.Contains(result, "Interactive Content") {
t.Error("Should contain interactive content heading")
}
if !strings.Contains(result, "Interactive element title") {
t.Error("Should contain interactive element title")
}
if !strings.Contains(result, "Interactive content description") {
t.Error("Should contain interactive content description")
}
}
// TestHTMLExporter_ProcessDividerItem tests the processDividerItem method.
func TestHTMLExporter_ProcessDividerItem(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner()
exporter := &HTMLExporter{htmlCleaner: htmlCleaner}
var buf bytes.Buffer
exporter.processDividerItem(&buf)
result := buf.String()
expected := " <hr>\n\n"
if result != expected {
t.Errorf("Expected %q, got %q", expected, result)
}
}
// TestHTMLExporter_ProcessUnknownItem tests the processUnknownItem method.
func TestHTMLExporter_ProcessUnknownItem(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner()
exporter := &HTMLExporter{htmlCleaner: htmlCleaner}
var buf bytes.Buffer
item := models.Item{
Type: "unknown",
Items: []models.SubItem{
{
Title: "<p>Unknown item title</p>",
Paragraph: "<p>Unknown item content</p>",
},
},
}
exporter.processUnknownItem(&buf, item)
result := buf.String()
if !strings.Contains(result, "unknown-item") {
t.Error("Should contain unknown-item CSS class")
}
if !strings.Contains(result, "Unknown Content") {
t.Error("Should contain unknown content heading")
}
if !strings.Contains(result, "Unknown item title") {
t.Error("Should contain unknown item title")
}
if !strings.Contains(result, "Unknown item content") {
t.Error("Should contain unknown item content")
}
}
// TestHTMLExporter_ProcessAnswers tests the processAnswers method.
func TestHTMLExporter_ProcessAnswers(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner()
exporter := &HTMLExporter{htmlCleaner: htmlCleaner}
var buf bytes.Buffer
answers := []models.Answer{
{Title: "Answer 1", Correct: false},
{Title: "Answer 2", Correct: true},
{Title: "Answer 3", Correct: false},
}
exporter.processAnswers(&buf, answers)
result := buf.String()
if !strings.Contains(result, "answers") {
t.Error("Should contain answers CSS class")
}
if !strings.Contains(result, "<h5>Answers:</h5>") {
t.Error("Should contain answers heading")
}
if !strings.Contains(result, "<ol>") {
t.Error("Should contain ordered list")
}
if !strings.Contains(result, "<li>Answer 1</li>") {
t.Error("Should contain first answer")
}
if !strings.Contains(result, "correct-answer") {
t.Error("Should mark correct answer with CSS class")
}
if !strings.Contains(result, "<li class=\"correct-answer\">Answer 2</li>") {
t.Error("Should mark correct answer properly")
}
if !strings.Contains(result, "<li>Answer 3</li>") {
t.Error("Should contain third answer")
}
}
// TestHTMLExporter_ProcessItemToHTML_AllTypes tests all item types.
func TestHTMLExporter_ProcessItemToHTML_AllTypes(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner()
exporter := &HTMLExporter{htmlCleaner: htmlCleaner}
tests := []struct {
name string
itemType string
expectedText string
}{
{
name: "text item",
itemType: "text",
expectedText: "Text Content",
},
{
name: "list item",
itemType: "list",
expectedText: "List",
},
{
name: "knowledge check item",
itemType: "knowledgeCheck",
expectedText: "Knowledge Check",
},
{
name: "multimedia item",
itemType: "multimedia",
expectedText: "Media Content",
},
{
name: "image item",
itemType: "image",
expectedText: "Image",
},
{
name: "interactive item",
itemType: "interactive",
expectedText: "Interactive Content",
},
{
name: "divider item",
itemType: "divider",
expectedText: "<hr>",
},
{
name: "unknown item",
itemType: "unknown",
expectedText: "Unknown Content",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var buf bytes.Buffer
item := models.Item{
Type: tt.itemType,
Items: []models.SubItem{
{Title: "Test title", Paragraph: "Test content"},
},
}
// Handle empty unknown items
if tt.itemType == "unknown" && tt.expectedText == "" {
item.Items = []models.SubItem{}
}
exporter.processItemToHTML(&buf, item)
result := buf.String()
if tt.expectedText != "" && !strings.Contains(result, tt.expectedText) {
t.Errorf("Expected content to contain: %q\nGot: %q", tt.expectedText, result)
}
})
}
}
// TestHTMLExporter_ComplexCourse tests export of a complex course structure.
func TestHTMLExporter_ComplexCourse(t *testing.T) { func TestHTMLExporter_ComplexCourse(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner() htmlCleaner := services.NewHTMLCleaner()
exporter := NewHTMLExporter(htmlCleaner) exporter := NewHTMLExporter(htmlCleaner)
@ -344,17 +743,11 @@ func TestHTMLExporter_HTMLCleaning(t *testing.T) {
Type: "text", Type: "text",
Items: []models.SubItem{ Items: []models.SubItem{
{ {
Heading: "<h2>HTML Heading</h2>", Heading: "<h1>Heading with <em>emphasis</em> and &amp; entities</h1>",
Paragraph: "<p>Content with <em>emphasis</em> and <strong>strong</strong> text.</p>", Paragraph: "<p>Paragraph with &lt;code&gt; entities and <strong>formatting</strong>.</p>",
}, },
}, },
}, },
{
Type: "list",
Items: []models.SubItem{
{Paragraph: "<p>List item with <b>bold</b> text</p>"},
},
},
}, },
}, },
}, },
@ -369,6 +762,13 @@ func TestHTMLExporter_HTMLCleaning(t *testing.T) {
t.Fatalf("Export failed: %v", err) t.Fatalf("Export failed: %v", err)
} }
// Verify file was created (basic check that HTML handling didn't break export)
if _, err := os.Stat(outputPath); os.IsNotExist(err) {
t.Fatal("Output file was not created")
}
// Read content and verify some HTML is preserved (descriptions, headings, paragraphs)
// while list items are cleaned for safety
content, err := os.ReadFile(outputPath) content, err := os.ReadFile(outputPath)
if err != nil { if err != nil {
t.Fatalf("Failed to read output file: %v", err) t.Fatalf("Failed to read output file: %v", err)
@ -376,23 +776,19 @@ func TestHTMLExporter_HTMLCleaning(t *testing.T) {
contentStr := string(content) contentStr := string(content)
// HTML content in descriptions should be preserved // HTML should be preserved in some places
if !strings.Contains(contentStr, "<b>bold</b>") { if !strings.Contains(contentStr, "<b>bold</b>") {
t.Error("Should preserve HTML formatting in descriptions") t.Error("Should preserve HTML formatting in descriptions")
} }
if !strings.Contains(contentStr, "<h1>Heading with <em>emphasis</em>") {
// HTML content in headings should be preserved
if !strings.Contains(contentStr, "<h2>HTML Heading</h2>") {
t.Error("Should preserve HTML in headings") t.Error("Should preserve HTML in headings")
} }
if !strings.Contains(contentStr, "<strong>formatting</strong>") {
// List items should have HTML tags stripped (cleaned) t.Error("Should preserve HTML in paragraphs")
if !strings.Contains(contentStr, "List item with bold text") {
t.Error("Should clean HTML from list items")
} }
} }
// createTestCourseForHTML creates a test course for HTML export tests. // createTestCourseForHTML creates a test course for HTML export testing.
func createTestCourseForHTML() *models.Course { func createTestCourseForHTML() *models.Course {
return &models.Course{ return &models.Course{
ShareID: "test-share-id", ShareID: "test-share-id",
@ -442,14 +838,38 @@ func BenchmarkHTMLExporter_Export(b *testing.B) {
exporter := NewHTMLExporter(htmlCleaner) exporter := NewHTMLExporter(htmlCleaner)
course := createTestCourseForHTML() course := createTestCourseForHTML()
// Create temporary directory
tempDir := b.TempDir() tempDir := b.TempDir()
for i := range b.N { b.ResetTimer()
outputPath := filepath.Join(tempDir, "bench-course-"+string(rune(i))+".html") for i := 0; i < b.N; i++ {
if err := exporter.Export(course, outputPath); err != nil { outputPath := filepath.Join(tempDir, "benchmark-course.html")
b.Fatalf("Export failed: %v", err) _ = exporter.Export(course, outputPath)
// Clean up for next iteration
os.Remove(outputPath)
} }
} }
// BenchmarkHTMLExporter_ProcessTextItem benchmarks text item processing.
func BenchmarkHTMLExporter_ProcessTextItem(b *testing.B) {
htmlCleaner := services.NewHTMLCleaner()
exporter := &HTMLExporter{htmlCleaner: htmlCleaner}
item := models.Item{
Type: "text",
Items: []models.SubItem{
{
Heading: "<h1>Benchmark Heading</h1>",
Paragraph: "<p>Benchmark paragraph with <strong>formatting</strong>.</p>",
},
},
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
var buf bytes.Buffer
exporter.processTextItem(&buf, item)
}
} }
// BenchmarkHTMLExporter_ComplexCourse benchmarks export of a complex course. // BenchmarkHTMLExporter_ComplexCourse benchmarks export of a complex course.
@ -469,40 +889,39 @@ func BenchmarkHTMLExporter_ComplexCourse(b *testing.B) {
} }
// Fill with test data // Fill with test data
for i := range 10 { for i := 0; i < 10; i++ {
lesson := models.Lesson{ lesson := models.Lesson{
ID: "lesson-" + string(rune(i)), ID: "lesson-" + string(rune(i)),
Title: "Benchmark Lesson " + string(rune(i)), Title: "Lesson " + string(rune(i)),
Type: "lesson", Type: "lesson",
Description: "<p>Lesson description</p>", Items: make([]models.Item, 5), // 5 items per lesson
Items: []models.Item{
{
Type: "text",
Items: []models.SubItem{
{
Heading: "<h2>Heading</h2>",
Paragraph: "<p>Paragraph with content.</p>",
},
},
},
{
Type: "list",
Items: []models.SubItem{
{Paragraph: "<p>Item 1</p>"},
{Paragraph: "<p>Item 2</p>"},
},
},
},
} }
for j := 0; j < 5; j++ {
item := models.Item{
Type: "text",
Items: make([]models.SubItem, 3), // 3 sub-items per item
}
for k := 0; k < 3; k++ {
item.Items[k] = models.SubItem{
Heading: "<h3>Heading " + string(rune(k)) + "</h3>",
Paragraph: "<p>Paragraph content with <strong>formatting</strong> for performance testing.</p>",
}
}
lesson.Items[j] = item
}
course.Course.Lessons[i] = lesson course.Course.Lessons[i] = lesson
} }
tempDir := b.TempDir() tempDir := b.TempDir()
for i := range b.N { b.ResetTimer()
outputPath := filepath.Join(tempDir, "bench-complex-"+string(rune(i))+".html") for i := 0; i < b.N; i++ {
if err := exporter.Export(course, outputPath); err != nil { outputPath := filepath.Join(tempDir, "benchmark-complex.html")
b.Fatalf("Export failed: %v", err) _ = exporter.Export(course, outputPath)
} os.Remove(outputPath)
} }
} }

View File

@ -1,3 +1,5 @@
// Package exporters provides implementations of the Exporter interface
// for converting Articulate Rise courses into various file formats.
package exporters package exporters
import ( import (
@ -6,9 +8,6 @@ import (
"os" "os"
"strings" "strings"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"github.com/kjanat/articulate-parser/internal/interfaces" "github.com/kjanat/articulate-parser/internal/interfaces"
"github.com/kjanat/articulate-parser/internal/models" "github.com/kjanat/articulate-parser/internal/models"
"github.com/kjanat/articulate-parser/internal/services" "github.com/kjanat/articulate-parser/internal/services"
@ -35,7 +34,16 @@ func NewMarkdownExporter(htmlCleaner *services.HTMLCleaner) interfaces.Exporter
} }
} }
// Export converts the course to Markdown format and writes it to the output path. // Export exports a course to Markdown format.
// It generates a structured Markdown document from the course data
// and writes it to the specified output path.
//
// Parameters:
// - course: The course data model to export
// - outputPath: The file path where the Markdown content will be written
//
// Returns:
// - An error if writing to the output file fails
func (e *MarkdownExporter) Export(course *models.Course, outputPath string) error { func (e *MarkdownExporter) Export(course *models.Course, outputPath string) error {
var buf bytes.Buffer var buf bytes.Buffer
@ -79,20 +87,26 @@ func (e *MarkdownExporter) Export(course *models.Course, outputPath string) erro
buf.WriteString("\n---\n\n") buf.WriteString("\n---\n\n")
} }
// #nosec G306 - 0644 is appropriate for export files that should be readable by others return os.WriteFile(outputPath, buf.Bytes(), 0644)
if err := os.WriteFile(outputPath, buf.Bytes(), 0o644); err != nil {
return fmt.Errorf("failed to write markdown file: %w", err)
}
return nil
} }
// SupportedFormat returns "markdown". // GetSupportedFormat returns the format name this exporter supports
func (e *MarkdownExporter) SupportedFormat() string { // It indicates the file format that the MarkdownExporter can generate.
return FormatMarkdown //
// Returns:
// - A string representing the supported format ("markdown")
func (e *MarkdownExporter) GetSupportedFormat() string {
return "markdown"
} }
// processItemToMarkdown converts a course item into Markdown format. // processItemToMarkdown converts a course item into Markdown format
// The level parameter determines the heading level (number of # characters). // and appends it to the provided buffer. It handles different item types
// with appropriate Markdown formatting.
//
// Parameters:
// - buf: The buffer to write the Markdown content to
// - item: The course item to process
// - level: The heading level for the item (determines the number of # characters)
func (e *MarkdownExporter) processItemToMarkdown(buf *bytes.Buffer, item models.Item, level int) { func (e *MarkdownExporter) processItemToMarkdown(buf *bytes.Buffer, item models.Item, level int) {
headingPrefix := strings.Repeat("#", level) headingPrefix := strings.Repeat("#", level)
@ -116,47 +130,47 @@ func (e *MarkdownExporter) processItemToMarkdown(buf *bytes.Buffer, item models.
} }
} }
// processTextItem handles text content with headings and paragraphs. // processTextItem handles text content with headings and paragraphs
func (e *MarkdownExporter) processTextItem(buf *bytes.Buffer, item models.Item, headingPrefix string) { func (e *MarkdownExporter) processTextItem(buf *bytes.Buffer, item models.Item, headingPrefix string) {
for _, subItem := range item.Items { for _, subItem := range item.Items {
if subItem.Heading != "" { if subItem.Heading != "" {
heading := e.htmlCleaner.CleanHTML(subItem.Heading) heading := e.htmlCleaner.CleanHTML(subItem.Heading)
if heading != "" { if heading != "" {
fmt.Fprintf(buf, "%s %s\n\n", headingPrefix, heading) buf.WriteString(fmt.Sprintf("%s %s\n\n", headingPrefix, heading))
} }
} }
if subItem.Paragraph != "" { if subItem.Paragraph != "" {
paragraph := e.htmlCleaner.CleanHTML(subItem.Paragraph) paragraph := e.htmlCleaner.CleanHTML(subItem.Paragraph)
if paragraph != "" { if paragraph != "" {
fmt.Fprintf(buf, "%s\n\n", paragraph) buf.WriteString(fmt.Sprintf("%s\n\n", paragraph))
} }
} }
} }
} }
// processListItem handles list items with bullet points. // processListItem handles list items with bullet points
func (e *MarkdownExporter) processListItem(buf *bytes.Buffer, item models.Item) { func (e *MarkdownExporter) processListItem(buf *bytes.Buffer, item models.Item) {
for _, subItem := range item.Items { for _, subItem := range item.Items {
if subItem.Paragraph != "" { if subItem.Paragraph != "" {
paragraph := e.htmlCleaner.CleanHTML(subItem.Paragraph) paragraph := e.htmlCleaner.CleanHTML(subItem.Paragraph)
if paragraph != "" { if paragraph != "" {
fmt.Fprintf(buf, "- %s\n", paragraph) buf.WriteString(fmt.Sprintf("- %s\n", paragraph))
} }
} }
} }
buf.WriteString("\n") buf.WriteString("\n")
} }
// processMultimediaItem handles multimedia content including videos and images. // processMultimediaItem handles multimedia content including videos and images
func (e *MarkdownExporter) processMultimediaItem(buf *bytes.Buffer, item models.Item, headingPrefix string) { func (e *MarkdownExporter) processMultimediaItem(buf *bytes.Buffer, item models.Item, headingPrefix string) {
fmt.Fprintf(buf, "%s Media Content\n\n", headingPrefix) buf.WriteString(fmt.Sprintf("%s Media Content\n\n", headingPrefix))
for _, subItem := range item.Items { for _, subItem := range item.Items {
e.processMediaSubItem(buf, subItem) e.processMediaSubItem(buf, subItem)
} }
buf.WriteString("\n") buf.WriteString("\n")
} }
// processMediaSubItem processes individual media items (video/image). // processMediaSubItem processes individual media items (video/image)
func (e *MarkdownExporter) processMediaSubItem(buf *bytes.Buffer, subItem models.SubItem) { func (e *MarkdownExporter) processMediaSubItem(buf *bytes.Buffer, subItem models.SubItem) {
if subItem.Media != nil { if subItem.Media != nil {
e.processVideoMedia(buf, subItem.Media) e.processVideoMedia(buf, subItem.Media)
@ -164,67 +178,67 @@ func (e *MarkdownExporter) processMediaSubItem(buf *bytes.Buffer, subItem models
} }
if subItem.Caption != "" { if subItem.Caption != "" {
caption := e.htmlCleaner.CleanHTML(subItem.Caption) caption := e.htmlCleaner.CleanHTML(subItem.Caption)
fmt.Fprintf(buf, "*%s*\n", caption) buf.WriteString(fmt.Sprintf("*%s*\n", caption))
} }
} }
// processVideoMedia processes video media content. // processVideoMedia processes video media content
func (e *MarkdownExporter) processVideoMedia(buf *bytes.Buffer, media *models.Media) { func (e *MarkdownExporter) processVideoMedia(buf *bytes.Buffer, media *models.Media) {
if media.Video != nil { if media.Video != nil {
fmt.Fprintf(buf, "**Video**: %s\n", media.Video.OriginalURL) buf.WriteString(fmt.Sprintf("**Video**: %s\n", media.Video.OriginalUrl))
if media.Video.Duration > 0 { if media.Video.Duration > 0 {
fmt.Fprintf(buf, "**Duration**: %d seconds\n", media.Video.Duration) buf.WriteString(fmt.Sprintf("**Duration**: %d seconds\n", media.Video.Duration))
} }
} }
} }
// processImageMedia processes image media content. // processImageMedia processes image media content
func (e *MarkdownExporter) processImageMedia(buf *bytes.Buffer, media *models.Media) { func (e *MarkdownExporter) processImageMedia(buf *bytes.Buffer, media *models.Media) {
if media.Image != nil { if media.Image != nil {
fmt.Fprintf(buf, "**Image**: %s\n", media.Image.OriginalURL) buf.WriteString(fmt.Sprintf("**Image**: %s\n", media.Image.OriginalUrl))
} }
} }
// processImageItem handles standalone image items. // processImageItem handles standalone image items
func (e *MarkdownExporter) processImageItem(buf *bytes.Buffer, item models.Item, headingPrefix string) { func (e *MarkdownExporter) processImageItem(buf *bytes.Buffer, item models.Item, headingPrefix string) {
fmt.Fprintf(buf, "%s Image\n\n", headingPrefix) buf.WriteString(fmt.Sprintf("%s Image\n\n", headingPrefix))
for _, subItem := range item.Items { for _, subItem := range item.Items {
if subItem.Media != nil && subItem.Media.Image != nil { if subItem.Media != nil && subItem.Media.Image != nil {
fmt.Fprintf(buf, "**Image**: %s\n", subItem.Media.Image.OriginalURL) buf.WriteString(fmt.Sprintf("**Image**: %s\n", subItem.Media.Image.OriginalUrl))
} }
if subItem.Caption != "" { if subItem.Caption != "" {
caption := e.htmlCleaner.CleanHTML(subItem.Caption) caption := e.htmlCleaner.CleanHTML(subItem.Caption)
fmt.Fprintf(buf, "*%s*\n", caption) buf.WriteString(fmt.Sprintf("*%s*\n", caption))
} }
} }
buf.WriteString("\n") buf.WriteString("\n")
} }
// processKnowledgeCheckItem handles quiz questions and knowledge checks. // processKnowledgeCheckItem handles quiz questions and knowledge checks
func (e *MarkdownExporter) processKnowledgeCheckItem(buf *bytes.Buffer, item models.Item, headingPrefix string) { func (e *MarkdownExporter) processKnowledgeCheckItem(buf *bytes.Buffer, item models.Item, headingPrefix string) {
fmt.Fprintf(buf, "%s Knowledge Check\n\n", headingPrefix) buf.WriteString(fmt.Sprintf("%s Knowledge Check\n\n", headingPrefix))
for _, subItem := range item.Items { for _, subItem := range item.Items {
e.processQuestionSubItem(buf, subItem) e.processQuestionSubItem(buf, subItem)
} }
buf.WriteString("\n") buf.WriteString("\n")
} }
// processQuestionSubItem processes individual question items. // processQuestionSubItem processes individual question items
func (e *MarkdownExporter) processQuestionSubItem(buf *bytes.Buffer, subItem models.SubItem) { func (e *MarkdownExporter) processQuestionSubItem(buf *bytes.Buffer, subItem models.SubItem) {
if subItem.Title != "" { if subItem.Title != "" {
title := e.htmlCleaner.CleanHTML(subItem.Title) title := e.htmlCleaner.CleanHTML(subItem.Title)
fmt.Fprintf(buf, "**Question**: %s\n\n", title) buf.WriteString(fmt.Sprintf("**Question**: %s\n\n", title))
} }
e.processAnswers(buf, subItem.Answers) e.processAnswers(buf, subItem.Answers)
if subItem.Feedback != "" { if subItem.Feedback != "" {
feedback := e.htmlCleaner.CleanHTML(subItem.Feedback) feedback := e.htmlCleaner.CleanHTML(subItem.Feedback)
fmt.Fprintf(buf, "\n**Feedback**: %s\n", feedback) buf.WriteString(fmt.Sprintf("\n**Feedback**: %s\n", feedback))
} }
} }
// processAnswers processes answer choices for quiz questions. // processAnswers processes answer choices for quiz questions
func (e *MarkdownExporter) processAnswers(buf *bytes.Buffer, answers []models.Answer) { func (e *MarkdownExporter) processAnswers(buf *bytes.Buffer, answers []models.Answer) {
buf.WriteString("**Answers**:\n") buf.WriteString("**Answers**:\n")
for i, answer := range answers { for i, answer := range answers {
@ -232,45 +246,44 @@ func (e *MarkdownExporter) processAnswers(buf *bytes.Buffer, answers []models.An
if answer.Correct { if answer.Correct {
correctMark = " ✓" correctMark = " ✓"
} }
fmt.Fprintf(buf, "%d. %s%s\n", i+1, answer.Title, correctMark) buf.WriteString(fmt.Sprintf("%d. %s%s\n", i+1, answer.Title, correctMark))
} }
} }
// processInteractiveItem handles interactive content. // processInteractiveItem handles interactive content
func (e *MarkdownExporter) processInteractiveItem(buf *bytes.Buffer, item models.Item, headingPrefix string) { func (e *MarkdownExporter) processInteractiveItem(buf *bytes.Buffer, item models.Item, headingPrefix string) {
fmt.Fprintf(buf, "%s Interactive Content\n\n", headingPrefix) buf.WriteString(fmt.Sprintf("%s Interactive Content\n\n", headingPrefix))
for _, subItem := range item.Items { for _, subItem := range item.Items {
if subItem.Title != "" { if subItem.Title != "" {
title := e.htmlCleaner.CleanHTML(subItem.Title) title := e.htmlCleaner.CleanHTML(subItem.Title)
fmt.Fprintf(buf, "**%s**\n\n", title) buf.WriteString(fmt.Sprintf("**%s**\n\n", title))
} }
} }
} }
// processDividerItem handles divider elements. // processDividerItem handles divider elements
func (e *MarkdownExporter) processDividerItem(buf *bytes.Buffer) { func (e *MarkdownExporter) processDividerItem(buf *bytes.Buffer) {
buf.WriteString("---\n\n") buf.WriteString("---\n\n")
} }
// processUnknownItem handles unknown or unsupported item types. // processUnknownItem handles unknown or unsupported item types
func (e *MarkdownExporter) processUnknownItem(buf *bytes.Buffer, item models.Item, headingPrefix string) { func (e *MarkdownExporter) processUnknownItem(buf *bytes.Buffer, item models.Item, headingPrefix string) {
if len(item.Items) > 0 { if len(item.Items) > 0 {
caser := cases.Title(language.English) buf.WriteString(fmt.Sprintf("%s %s Content\n\n", headingPrefix, strings.Title(item.Type)))
fmt.Fprintf(buf, "%s %s Content\n\n", headingPrefix, caser.String(item.Type))
for _, subItem := range item.Items { for _, subItem := range item.Items {
e.processGenericSubItem(buf, subItem) e.processGenericSubItem(buf, subItem)
} }
} }
} }
// processGenericSubItem processes sub-items for unknown types. // processGenericSubItem processes sub-items for unknown types
func (e *MarkdownExporter) processGenericSubItem(buf *bytes.Buffer, subItem models.SubItem) { func (e *MarkdownExporter) processGenericSubItem(buf *bytes.Buffer, subItem models.SubItem) {
if subItem.Title != "" { if subItem.Title != "" {
title := e.htmlCleaner.CleanHTML(subItem.Title) title := e.htmlCleaner.CleanHTML(subItem.Title)
fmt.Fprintf(buf, "**%s**\n\n", title) buf.WriteString(fmt.Sprintf("**%s**\n\n", title))
} }
if subItem.Paragraph != "" { if subItem.Paragraph != "" {
paragraph := e.htmlCleaner.CleanHTML(subItem.Paragraph) paragraph := e.htmlCleaner.CleanHTML(subItem.Paragraph)
fmt.Fprintf(buf, "%s\n\n", paragraph) buf.WriteString(fmt.Sprintf("%s\n\n", paragraph))
} }
} }

View File

@ -1,3 +1,4 @@
// Package exporters_test provides tests for the markdown exporter.
package exporters package exporters
import ( import (
@ -31,13 +32,13 @@ func TestNewMarkdownExporter(t *testing.T) {
} }
} }
// TestMarkdownExporter_SupportedFormat tests the SupportedFormat method. // TestMarkdownExporter_GetSupportedFormat tests the GetSupportedFormat method.
func TestMarkdownExporter_SupportedFormat(t *testing.T) { func TestMarkdownExporter_GetSupportedFormat(t *testing.T) {
htmlCleaner := services.NewHTMLCleaner() htmlCleaner := services.NewHTMLCleaner()
exporter := NewMarkdownExporter(htmlCleaner) exporter := NewMarkdownExporter(htmlCleaner)
expected := "markdown" expected := "markdown"
result := exporter.SupportedFormat() result := exporter.GetSupportedFormat()
if result != expected { if result != expected {
t.Errorf("Expected format '%s', got '%s'", expected, result) t.Errorf("Expected format '%s', got '%s'", expected, result)
@ -187,7 +188,7 @@ func TestMarkdownExporter_ProcessMultimediaItem(t *testing.T) {
{ {
Media: &models.Media{ Media: &models.Media{
Video: &models.VideoMedia{ Video: &models.VideoMedia{
OriginalURL: "https://example.com/video.mp4", OriginalUrl: "https://example.com/video.mp4",
Duration: 120, Duration: 120,
}, },
}, },
@ -226,7 +227,7 @@ func TestMarkdownExporter_ProcessImageItem(t *testing.T) {
{ {
Media: &models.Media{ Media: &models.Media{
Image: &models.ImageMedia{ Image: &models.ImageMedia{
OriginalURL: "https://example.com/image.jpg", OriginalUrl: "https://example.com/image.jpg",
}, },
}, },
Caption: "<p>Image caption</p>", Caption: "<p>Image caption</p>",
@ -371,7 +372,7 @@ func TestMarkdownExporter_ProcessVideoMedia(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
media := &models.Media{ media := &models.Media{
Video: &models.VideoMedia{ Video: &models.VideoMedia{
OriginalURL: "https://example.com/video.mp4", OriginalUrl: "https://example.com/video.mp4",
Duration: 300, Duration: 300,
}, },
} }
@ -396,7 +397,7 @@ func TestMarkdownExporter_ProcessImageMedia(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
media := &models.Media{ media := &models.Media{
Image: &models.ImageMedia{ Image: &models.ImageMedia{
OriginalURL: "https://example.com/image.jpg", OriginalUrl: "https://example.com/image.jpg",
}, },
} }
@ -660,13 +661,12 @@ func BenchmarkMarkdownExporter_Export(b *testing.B) {
// Create temporary directory // Create temporary directory
tempDir := b.TempDir() tempDir := b.TempDir()
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
outputPath := filepath.Join(tempDir, "benchmark-course.md") outputPath := filepath.Join(tempDir, "benchmark-course.md")
_ = exporter.Export(course, outputPath) _ = exporter.Export(course, outputPath)
// Clean up for next iteration. Remove errors are ignored because we've already // Clean up for next iteration
// benchmarked the export operation; cleanup failures don't affect the benchmark os.Remove(outputPath)
// measurements or the validity of the next iteration's export.
_ = os.Remove(outputPath)
} }
} }
@ -685,7 +685,8 @@ func BenchmarkMarkdownExporter_ProcessTextItem(b *testing.B) {
}, },
} }
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
var buf bytes.Buffer var buf bytes.Buffer
exporter.processTextItem(&buf, item, "###") exporter.processTextItem(&buf, item, "###")
} }

Binary file not shown.

View File

@ -1,12 +0,0 @@
# Example Course
Course description
## Course Information
- **Course ID**:
- **Share ID**: example-id
- **Navigation Mode**:
---

View File

@ -1,3 +1,5 @@
// Package interfaces provides the core contracts for the articulate-parser application.
// It defines interfaces for parsing and exporting Articulate Rise courses.
package interfaces package interfaces
import "github.com/kjanat/articulate-parser/internal/models" import "github.com/kjanat/articulate-parser/internal/models"
@ -10,9 +12,9 @@ type Exporter interface {
// specified output path. It returns an error if the export operation fails. // specified output path. It returns an error if the export operation fails.
Export(course *models.Course, outputPath string) error Export(course *models.Course, outputPath string) error
// SupportedFormat returns the name of the format this exporter supports. // GetSupportedFormat returns the name of the format this exporter supports.
// This is used to identify which exporter to use for a given format. // This is used to identify which exporter to use for a given format.
SupportedFormat() string GetSupportedFormat() string
} }
// ExporterFactory creates exporters for different formats. // ExporterFactory creates exporters for different formats.
@ -23,7 +25,7 @@ type ExporterFactory interface {
// It returns the appropriate exporter or an error if the format is not supported. // It returns the appropriate exporter or an error if the format is not supported.
CreateExporter(format string) (Exporter, error) CreateExporter(format string) (Exporter, error)
// SupportedFormats returns a list of all export formats supported by this factory. // GetSupportedFormats returns a list of all export formats supported by this factory.
// This is used to inform users of available export options. // This is used to inform users of available export options.
SupportedFormats() []string GetSupportedFormats() []string
} }

View File

@ -1,25 +0,0 @@
package interfaces
import "context"
// Logger defines the interface for structured logging.
// Implementations should provide leveled, structured logging capabilities.
type Logger interface {
// Debug logs a debug-level message with optional key-value pairs.
Debug(msg string, keysAndValues ...any)
// Info logs an info-level message with optional key-value pairs.
Info(msg string, keysAndValues ...any)
// Warn logs a warning-level message with optional key-value pairs.
Warn(msg string, keysAndValues ...any)
// Error logs an error-level message with optional key-value pairs.
Error(msg string, keysAndValues ...any)
// With returns a new logger with the given key-value pairs added as context.
With(keysAndValues ...any) Logger
// WithContext returns a new logger with context information.
WithContext(ctx context.Context) Logger
}

View File

@ -2,11 +2,7 @@
// It defines interfaces for parsing and exporting Articulate Rise courses. // It defines interfaces for parsing and exporting Articulate Rise courses.
package interfaces package interfaces
import ( import "github.com/kjanat/articulate-parser/internal/models"
"context"
"github.com/kjanat/articulate-parser/internal/models"
)
// CourseParser defines the interface for loading course data. // CourseParser defines the interface for loading course data.
// It provides methods to fetch course content either from a remote URI // It provides methods to fetch course content either from a remote URI
@ -14,9 +10,8 @@ import (
type CourseParser interface { type CourseParser interface {
// FetchCourse loads a course from a URI (typically an Articulate Rise share URL). // FetchCourse loads a course from a URI (typically an Articulate Rise share URL).
// It retrieves the course data from the remote location and returns a parsed Course model. // It retrieves the course data from the remote location and returns a parsed Course model.
// The context can be used for cancellation and timeout control.
// Returns an error if the fetch operation fails or if the data cannot be parsed. // Returns an error if the fetch operation fails or if the data cannot be parsed.
FetchCourse(ctx context.Context, uri string) (*models.Course, error) FetchCourse(uri string) (*models.Course, error)
// LoadCourseFromFile loads a course from a local file. // LoadCourseFromFile loads a course from a local file.
// It reads and parses the course data from the specified file path. // It reads and parses the course data from the specified file path.

View File

@ -1,3 +1,5 @@
// Package models defines the data structures representing Articulate Rise courses.
// These structures closely match the JSON format used by Articulate Rise.
package models package models
// Lesson represents a single lesson or section within an Articulate Rise course. // Lesson represents a single lesson or section within an Articulate Rise course.
@ -16,7 +18,7 @@ type Lesson struct {
// Items is an ordered array of content items within the lesson // Items is an ordered array of content items within the lesson
Items []Item `json:"items"` Items []Item `json:"items"`
// Position stores the ordering information for the lesson // Position stores the ordering information for the lesson
Position any `json:"position"` Position interface{} `json:"position"`
// Ready indicates whether the lesson is marked as complete // Ready indicates whether the lesson is marked as complete
Ready bool `json:"ready"` Ready bool `json:"ready"`
// CreatedAt is the timestamp when the lesson was created // CreatedAt is the timestamp when the lesson was created
@ -39,9 +41,9 @@ type Item struct {
// Items contains the actual content elements (sub-items) of this item // Items contains the actual content elements (sub-items) of this item
Items []SubItem `json:"items"` Items []SubItem `json:"items"`
// Settings contains configuration options specific to this item type // Settings contains configuration options specific to this item type
Settings any `json:"settings"` Settings interface{} `json:"settings"`
// Data contains additional structured data for the item // Data contains additional structured data for the item
Data any `json:"data"` Data interface{} `json:"data"`
// Media contains any associated media for the item // Media contains any associated media for the item
Media *Media `json:"media,omitempty"` Media *Media `json:"media,omitempty"`
} }

View File

@ -1,3 +1,5 @@
// Package models defines the data structures representing Articulate Rise courses.
// These structures closely match the JSON format used by Articulate Rise.
package models package models
// Media represents a media element that can be either an image or a video. // Media represents a media element that can be either an image or a video.
@ -21,8 +23,8 @@ type ImageMedia struct {
Height int `json:"height,omitempty"` Height int `json:"height,omitempty"`
// CrushedKey is the identifier for a compressed version of the image // CrushedKey is the identifier for a compressed version of the image
CrushedKey string `json:"crushedKey,omitempty"` CrushedKey string `json:"crushedKey,omitempty"`
// OriginalURL is the URL to the full-resolution image // OriginalUrl is the URL to the full-resolution image
OriginalURL string `json:"originalUrl"` OriginalUrl string `json:"originalUrl"`
// UseCrushedKey indicates whether to use the compressed version // UseCrushedKey indicates whether to use the compressed version
UseCrushedKey bool `json:"useCrushedKey,omitempty"` UseCrushedKey bool `json:"useCrushedKey,omitempty"`
} }
@ -43,6 +45,6 @@ type VideoMedia struct {
InputKey string `json:"inputKey,omitempty"` InputKey string `json:"inputKey,omitempty"`
// Thumbnail is the URL to a smaller preview image // Thumbnail is the URL to a smaller preview image
Thumbnail string `json:"thumbnail,omitempty"` Thumbnail string `json:"thumbnail,omitempty"`
// OriginalURL is the URL to the source video file // OriginalUrl is the URL to the source video file
OriginalURL string `json:"originalUrl"` OriginalUrl string `json:"originalUrl"`
} }

View File

@ -1,3 +1,4 @@
// Package models_test provides tests for the data models.
package models package models
import ( import (
@ -97,7 +98,7 @@ func TestCourseInfo_JSONMarshalUnmarshal(t *testing.T) {
Type: "jpg", Type: "jpg",
Width: 800, Width: 800,
Height: 600, Height: 600,
OriginalURL: "https://example.com/image.jpg", OriginalUrl: "https://example.com/image.jpg",
}, },
}, },
} }
@ -132,7 +133,7 @@ func TestLesson_JSONMarshalUnmarshal(t *testing.T) {
Ready: true, Ready: true,
CreatedAt: "2023-06-01T12:00:00Z", CreatedAt: "2023-06-01T12:00:00Z",
UpdatedAt: "2023-06-01T13:00:00Z", UpdatedAt: "2023-06-01T13:00:00Z",
Position: map[string]any{"x": 1, "y": 2}, Position: map[string]interface{}{"x": 1, "y": 2},
Items: []Item{ Items: []Item{
{ {
ID: "item-test", ID: "item-test",
@ -148,13 +149,13 @@ func TestLesson_JSONMarshalUnmarshal(t *testing.T) {
URL: "https://example.com/video.mp4", URL: "https://example.com/video.mp4",
Type: "mp4", Type: "mp4",
Duration: 120, Duration: 120,
OriginalURL: "https://example.com/video.mp4", OriginalUrl: "https://example.com/video.mp4",
}, },
}, },
}, },
}, },
Settings: map[string]any{"autoplay": false}, Settings: map[string]interface{}{"autoplay": false},
Data: map[string]any{"metadata": "test"}, Data: map[string]interface{}{"metadata": "test"},
}, },
}, },
} }
@ -196,11 +197,11 @@ func TestItem_JSONMarshalUnmarshal(t *testing.T) {
Feedback: "Well done!", Feedback: "Well done!",
}, },
}, },
Settings: map[string]any{ Settings: map[string]interface{}{
"allowRetry": true, "allowRetry": true,
"showAnswer": true, "showAnswer": true,
}, },
Data: map[string]any{ Data: map[string]interface{}{
"points": 10, "points": 10,
"weight": 1.5, "weight": 1.5,
}, },
@ -243,7 +244,7 @@ func TestSubItem_JSONMarshalUnmarshal(t *testing.T) {
Type: "png", Type: "png",
Width: 400, Width: 400,
Height: 300, Height: 300,
OriginalURL: "https://example.com/subitem.png", OriginalUrl: "https://example.com/subitem.png",
CrushedKey: "crushed-123", CrushedKey: "crushed-123",
UseCrushedKey: true, UseCrushedKey: true,
}, },
@ -304,7 +305,7 @@ func TestMedia_JSONMarshalUnmarshal(t *testing.T) {
Type: "jpeg", Type: "jpeg",
Width: 1200, Width: 1200,
Height: 800, Height: 800,
OriginalURL: "https://example.com/media.jpg", OriginalUrl: "https://example.com/media.jpg",
CrushedKey: "crushed-media", CrushedKey: "crushed-media",
UseCrushedKey: false, UseCrushedKey: false,
}, },
@ -335,7 +336,7 @@ func TestMedia_JSONMarshalUnmarshal(t *testing.T) {
Poster: "https://example.com/poster.jpg", Poster: "https://example.com/poster.jpg",
Thumbnail: "https://example.com/thumb.jpg", Thumbnail: "https://example.com/thumb.jpg",
InputKey: "input-123", InputKey: "input-123",
OriginalURL: "https://example.com/original.mp4", OriginalUrl: "https://example.com/original.mp4",
}, },
} }
@ -362,7 +363,7 @@ func TestImageMedia_JSONMarshalUnmarshal(t *testing.T) {
Type: "gif", Type: "gif",
Width: 640, Width: 640,
Height: 480, Height: 480,
OriginalURL: "https://example.com/image.gif", OriginalUrl: "https://example.com/image.gif",
CrushedKey: "crushed-gif", CrushedKey: "crushed-gif",
UseCrushedKey: true, UseCrushedKey: true,
} }
@ -396,7 +397,7 @@ func TestVideoMedia_JSONMarshalUnmarshal(t *testing.T) {
Poster: "https://example.com/poster.jpg", Poster: "https://example.com/poster.jpg",
Thumbnail: "https://example.com/thumbnail.jpg", Thumbnail: "https://example.com/thumbnail.jpg",
InputKey: "upload-456", InputKey: "upload-456",
OriginalURL: "https://example.com/original.webm", OriginalUrl: "https://example.com/original.webm",
} }
// Marshal to JSON // Marshal to JSON
@ -474,7 +475,7 @@ func TestLabelSet_JSONMarshalUnmarshal(t *testing.T) {
func TestEmptyStructures(t *testing.T) { func TestEmptyStructures(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
data any data interface{}
}{ }{
{"Empty Course", Course{}}, {"Empty Course", Course{}},
{"Empty CourseInfo", CourseInfo{}}, {"Empty CourseInfo", CourseInfo{}},
@ -568,7 +569,7 @@ func TestNilPointerSafety(t *testing.T) {
// TestJSONTagsPresence tests that JSON tags are properly defined. // TestJSONTagsPresence tests that JSON tags are properly defined.
func TestJSONTagsPresence(t *testing.T) { func TestJSONTagsPresence(t *testing.T) {
// Test that important fields have JSON tags // Test that important fields have JSON tags
courseType := reflect.TypeFor[Course]() courseType := reflect.TypeOf(Course{})
if courseType.Kind() == reflect.Struct { if courseType.Kind() == reflect.Struct {
field, found := courseType.FieldByName("ShareID") field, found := courseType.FieldByName("ShareID")
if !found { if !found {
@ -585,7 +586,7 @@ func TestJSONTagsPresence(t *testing.T) {
} }
// Test CourseInfo // Test CourseInfo
courseInfoType := reflect.TypeFor[CourseInfo]() courseInfoType := reflect.TypeOf(CourseInfo{})
if courseInfoType.Kind() == reflect.Struct { if courseInfoType.Kind() == reflect.Struct {
field, found := courseInfoType.FieldByName("NavigationMode") field, found := courseInfoType.FieldByName("NavigationMode")
if !found { if !found {
@ -625,7 +626,8 @@ func BenchmarkCourse_JSONMarshal(b *testing.B) {
}, },
} }
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = json.Marshal(course) _, _ = json.Marshal(course)
} }
} }
@ -658,16 +660,17 @@ func BenchmarkCourse_JSONUnmarshal(b *testing.B) {
jsonData, _ := json.Marshal(course) jsonData, _ := json.Marshal(course)
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
var result Course var result Course
_ = json.Unmarshal(jsonData, &result) _ = json.Unmarshal(jsonData, &result)
} }
} }
// compareMaps compares two any values that should be maps. // compareMaps compares two interface{} values that should be maps
func compareMaps(original, unmarshaled any) bool { func compareMaps(original, unmarshaled interface{}) bool {
origMap, origOk := original.(map[string]any) origMap, origOk := original.(map[string]interface{})
unMap, unOk := unmarshaled.(map[string]any) unMap, unOk := unmarshaled.(map[string]interface{})
if !origOk || !unOk { if !origOk || !unOk {
// If not maps, use deep equal // If not maps, use deep equal
@ -711,7 +714,7 @@ func compareMaps(original, unmarshaled any) bool {
return true return true
} }
// compareLessons compares two Lesson structs accounting for JSON type conversion. // compareLessons compares two Lesson structs accounting for JSON type conversion
func compareLessons(original, unmarshaled Lesson) bool { func compareLessons(original, unmarshaled Lesson) bool {
// Compare all fields except Position and Items // Compare all fields except Position and Items
if original.ID != unmarshaled.ID || if original.ID != unmarshaled.ID ||
@ -734,7 +737,7 @@ func compareLessons(original, unmarshaled Lesson) bool {
return compareItems(original.Items, unmarshaled.Items) return compareItems(original.Items, unmarshaled.Items)
} }
// compareItems compares two Item slices accounting for JSON type conversion. // compareItems compares two Item slices accounting for JSON type conversion
func compareItems(original, unmarshaled []Item) bool { func compareItems(original, unmarshaled []Item) bool {
if len(original) != len(unmarshaled) { if len(original) != len(unmarshaled) {
return false return false
@ -748,7 +751,7 @@ func compareItems(original, unmarshaled []Item) bool {
return true return true
} }
// compareItem compares two Item structs accounting for JSON type conversion. // compareItem compares two Item structs accounting for JSON type conversion
func compareItem(original, unmarshaled Item) bool { func compareItem(original, unmarshaled Item) bool {
// Compare basic fields // Compare basic fields
if original.ID != unmarshaled.ID || if original.ID != unmarshaled.ID ||

View File

@ -3,7 +3,6 @@
package services package services
import ( import (
"context"
"fmt" "fmt"
"github.com/kjanat/articulate-parser/internal/interfaces" "github.com/kjanat/articulate-parser/internal/interfaces"
@ -45,8 +44,8 @@ func (a *App) ProcessCourseFromFile(filePath, format, outputPath string) error {
// ProcessCourseFromURI fetches a course from the provided URI and exports it to the specified format. // ProcessCourseFromURI fetches a course from the provided URI and exports it to the specified format.
// It takes the URI to fetch the course from, the desired export format, and the output file path. // It takes the URI to fetch the course from, the desired export format, and the output file path.
// Returns an error if fetching or exporting fails. // Returns an error if fetching or exporting fails.
func (a *App) ProcessCourseFromURI(ctx context.Context, uri, format, outputPath string) error { func (a *App) ProcessCourseFromURI(uri, format, outputPath string) error {
course, err := a.parser.FetchCourse(ctx, uri) course, err := a.parser.FetchCourse(uri)
if err != nil { if err != nil {
return fmt.Errorf("failed to fetch course: %w", err) return fmt.Errorf("failed to fetch course: %w", err)
} }
@ -70,8 +69,8 @@ func (a *App) exportCourse(course *models.Course, format, outputPath string) err
return nil return nil
} }
// SupportedFormats returns a list of all export formats supported by the application. // GetSupportedFormats returns a list of all export formats supported by the application.
// This information is provided by the ExporterFactory. // This information is provided by the ExporterFactory.
func (a *App) SupportedFormats() []string { func (a *App) GetSupportedFormats() []string {
return a.exporterFactory.SupportedFormats() return a.exporterFactory.GetSupportedFormats()
} }

View File

@ -1,7 +1,7 @@
// Package services_test provides tests for the services package.
package services package services
import ( import (
"context"
"errors" "errors"
"testing" "testing"
@ -11,13 +11,13 @@ import (
// MockCourseParser is a mock implementation of interfaces.CourseParser for testing. // MockCourseParser is a mock implementation of interfaces.CourseParser for testing.
type MockCourseParser struct { type MockCourseParser struct {
mockFetchCourse func(ctx context.Context, uri string) (*models.Course, error) mockFetchCourse func(uri string) (*models.Course, error)
mockLoadCourseFromFile func(filePath string) (*models.Course, error) mockLoadCourseFromFile func(filePath string) (*models.Course, error)
} }
func (m *MockCourseParser) FetchCourse(ctx context.Context, uri string) (*models.Course, error) { func (m *MockCourseParser) FetchCourse(uri string) (*models.Course, error) {
if m.mockFetchCourse != nil { if m.mockFetchCourse != nil {
return m.mockFetchCourse(ctx, uri) return m.mockFetchCourse(uri)
} }
return nil, errors.New("not implemented") return nil, errors.New("not implemented")
} }
@ -32,7 +32,7 @@ func (m *MockCourseParser) LoadCourseFromFile(filePath string) (*models.Course,
// MockExporter is a mock implementation of interfaces.Exporter for testing. // MockExporter is a mock implementation of interfaces.Exporter for testing.
type MockExporter struct { type MockExporter struct {
mockExport func(course *models.Course, outputPath string) error mockExport func(course *models.Course, outputPath string) error
mockSupportedFormat func() string mockGetSupportedFormat func() string
} }
func (m *MockExporter) Export(course *models.Course, outputPath string) error { func (m *MockExporter) Export(course *models.Course, outputPath string) error {
@ -42,9 +42,9 @@ func (m *MockExporter) Export(course *models.Course, outputPath string) error {
return nil return nil
} }
func (m *MockExporter) SupportedFormat() string { func (m *MockExporter) GetSupportedFormat() string {
if m.mockSupportedFormat != nil { if m.mockGetSupportedFormat != nil {
return m.mockSupportedFormat() return m.mockGetSupportedFormat()
} }
return "mock" return "mock"
} }
@ -52,7 +52,7 @@ func (m *MockExporter) SupportedFormat() string {
// MockExporterFactory is a mock implementation of interfaces.ExporterFactory for testing. // MockExporterFactory is a mock implementation of interfaces.ExporterFactory for testing.
type MockExporterFactory struct { type MockExporterFactory struct {
mockCreateExporter func(format string) (*MockExporter, error) mockCreateExporter func(format string) (*MockExporter, error)
mockSupportedFormats func() []string mockGetSupportedFormats func() []string
} }
func (m *MockExporterFactory) CreateExporter(format string) (interfaces.Exporter, error) { func (m *MockExporterFactory) CreateExporter(format string) (interfaces.Exporter, error) {
@ -63,9 +63,9 @@ func (m *MockExporterFactory) CreateExporter(format string) (interfaces.Exporter
return &MockExporter{}, nil return &MockExporter{}, nil
} }
func (m *MockExporterFactory) SupportedFormats() []string { func (m *MockExporterFactory) GetSupportedFormats() []string {
if m.mockSupportedFormats != nil { if m.mockGetSupportedFormats != nil {
return m.mockSupportedFormats() return m.mockGetSupportedFormats()
} }
return []string{"mock"} return []string{"mock"}
} }
@ -119,7 +119,7 @@ func TestNewApp(t *testing.T) {
} }
// Test that the factory is set (we can't directly compare interface values) // Test that the factory is set (we can't directly compare interface values)
formats := app.SupportedFormats() formats := app.GetSupportedFormats()
if len(formats) == 0 { if len(formats) == 0 {
t.Error("App exporterFactory was not set correctly - no supported formats") t.Error("App exporterFactory was not set correctly - no supported formats")
} }
@ -216,9 +216,11 @@ func TestApp_ProcessCourseFromFile(t *testing.T) {
if !contains(err.Error(), tt.expectedError) { if !contains(err.Error(), tt.expectedError) {
t.Errorf("Expected error containing '%s', got '%s'", tt.expectedError, err.Error()) t.Errorf("Expected error containing '%s', got '%s'", tt.expectedError, err.Error())
} }
} else if err != nil { } else {
if err != nil {
t.Errorf("Expected no error, got: %v", err) t.Errorf("Expected no error, got: %v", err)
} }
}
}) })
} }
} }
@ -241,7 +243,7 @@ func TestApp_ProcessCourseFromURI(t *testing.T) {
format: "docx", format: "docx",
outputPath: "output.docx", outputPath: "output.docx",
setupMocks: func(parser *MockCourseParser, factory *MockExporterFactory, exporter *MockExporter) { setupMocks: func(parser *MockCourseParser, factory *MockExporterFactory, exporter *MockExporter) {
parser.mockFetchCourse = func(ctx context.Context, uri string) (*models.Course, error) { parser.mockFetchCourse = func(uri string) (*models.Course, error) {
if uri != "https://rise.articulate.com/share/test123" { if uri != "https://rise.articulate.com/share/test123" {
t.Errorf("Expected uri 'https://rise.articulate.com/share/test123', got '%s'", uri) t.Errorf("Expected uri 'https://rise.articulate.com/share/test123', got '%s'", uri)
} }
@ -269,7 +271,7 @@ func TestApp_ProcessCourseFromURI(t *testing.T) {
format: "docx", format: "docx",
outputPath: "output.docx", outputPath: "output.docx",
setupMocks: func(parser *MockCourseParser, factory *MockExporterFactory, exporter *MockExporter) { setupMocks: func(parser *MockCourseParser, factory *MockExporterFactory, exporter *MockExporter) {
parser.mockFetchCourse = func(ctx context.Context, uri string) (*models.Course, error) { parser.mockFetchCourse = func(uri string) (*models.Course, error) {
return nil, errors.New("network error") return nil, errors.New("network error")
} }
}, },
@ -286,7 +288,7 @@ func TestApp_ProcessCourseFromURI(t *testing.T) {
tt.setupMocks(parser, factory, exporter) tt.setupMocks(parser, factory, exporter)
app := NewApp(parser, factory) app := NewApp(parser, factory)
err := app.ProcessCourseFromURI(context.Background(), tt.uri, tt.format, tt.outputPath) err := app.ProcessCourseFromURI(tt.uri, tt.format, tt.outputPath)
if tt.expectedError != "" { if tt.expectedError != "" {
if err == nil { if err == nil {
@ -295,26 +297,28 @@ func TestApp_ProcessCourseFromURI(t *testing.T) {
if !contains(err.Error(), tt.expectedError) { if !contains(err.Error(), tt.expectedError) {
t.Errorf("Expected error containing '%s', got '%s'", tt.expectedError, err.Error()) t.Errorf("Expected error containing '%s', got '%s'", tt.expectedError, err.Error())
} }
} else if err != nil { } else {
if err != nil {
t.Errorf("Expected no error, got: %v", err) t.Errorf("Expected no error, got: %v", err)
} }
}
}) })
} }
} }
// TestApp_SupportedFormats tests the SupportedFormats method. // TestApp_GetSupportedFormats tests the GetSupportedFormats method.
func TestApp_SupportedFormats(t *testing.T) { func TestApp_GetSupportedFormats(t *testing.T) {
expectedFormats := []string{"markdown", "docx", "pdf"} expectedFormats := []string{"markdown", "docx", "pdf"}
parser := &MockCourseParser{} parser := &MockCourseParser{}
factory := &MockExporterFactory{ factory := &MockExporterFactory{
mockSupportedFormats: func() []string { mockGetSupportedFormats: func() []string {
return expectedFormats return expectedFormats
}, },
} }
app := NewApp(parser, factory) app := NewApp(parser, factory)
formats := app.SupportedFormats() formats := app.GetSupportedFormats()
if len(formats) != len(expectedFormats) { if len(formats) != len(expectedFormats) {
t.Errorf("Expected %d formats, got %d", len(expectedFormats), len(formats)) t.Errorf("Expected %d formats, got %d", len(expectedFormats), len(formats))
@ -330,7 +334,7 @@ func TestApp_SupportedFormats(t *testing.T) {
// contains checks if a string contains a substring. // contains checks if a string contains a substring.
func contains(s, substr string) bool { func contains(s, substr string) bool {
return len(s) >= len(substr) && return len(s) >= len(substr) &&
(substr == "" || (len(substr) == 0 ||
s == substr || s == substr ||
(len(s) > len(substr) && (len(s) > len(substr) &&
(s[:len(substr)] == substr || (s[:len(substr)] == substr ||

View File

@ -1,96 +0,0 @@
// Package services_test provides examples for the services package.
package services_test
import (
"context"
"fmt"
"log"
"github.com/kjanat/articulate-parser/internal/services"
)
// ExampleNewArticulateParser demonstrates creating a new parser.
func ExampleNewArticulateParser() {
// Create a no-op logger for this example
logger := services.NewNoOpLogger()
// Create parser with defaults
parser := services.NewArticulateParser(logger, "", 0)
fmt.Printf("Parser created: %T\n", parser)
// Output: Parser created: *services.ArticulateParser
}
// ExampleNewArticulateParser_custom demonstrates creating a parser with custom configuration.
func ExampleNewArticulateParser_custom() {
logger := services.NewNoOpLogger()
// Create parser with custom base URL and timeout
parser := services.NewArticulateParser(
logger,
"https://custom.articulate.com",
60_000_000_000, // 60 seconds in nanoseconds
)
fmt.Printf("Parser configured: %T\n", parser)
// Output: Parser configured: *services.ArticulateParser
}
// ExampleArticulateParser_LoadCourseFromFile demonstrates loading a course from a file.
func ExampleArticulateParser_LoadCourseFromFile() {
logger := services.NewNoOpLogger()
parser := services.NewArticulateParser(logger, "", 0)
// In a real scenario, you'd have an actual file
// This example shows the API usage
_, err := parser.LoadCourseFromFile("course.json")
if err != nil {
log.Printf("Failed to load course: %v", err)
}
}
// ExampleArticulateParser_FetchCourse demonstrates fetching a course from a URI.
func ExampleArticulateParser_FetchCourse() {
logger := services.NewNoOpLogger()
parser := services.NewArticulateParser(logger, "", 0)
// Create a context with timeout
ctx := context.Background()
// In a real scenario, you'd use an actual share URL
_, err := parser.FetchCourse(ctx, "https://rise.articulate.com/share/YOUR_SHARE_ID")
if err != nil {
log.Printf("Failed to fetch course: %v", err)
}
}
// ExampleHTMLCleaner demonstrates cleaning HTML content.
func ExampleHTMLCleaner() {
cleaner := services.NewHTMLCleaner()
html := "<p>This is <strong>bold</strong> text with entities.</p>"
clean := cleaner.CleanHTML(html)
fmt.Println(clean)
// Output: This is bold text with entities.
}
// ExampleHTMLCleaner_CleanHTML demonstrates complex HTML cleaning.
func ExampleHTMLCleaner_CleanHTML() {
cleaner := services.NewHTMLCleaner()
html := `
<div>
<h1>Title</h1>
<p>Paragraph with <a href="#">link</a> and &amp; entity.</p>
<ul>
<li>Item 1</li>
<li>Item 2</li>
</ul>
</div>
`
clean := cleaner.CleanHTML(html)
fmt.Println(clean)
// Output: Title Paragraph with link and & entity. Item 1 Item 2
}

View File

@ -1,17 +1,15 @@
// Package services provides the core functionality for the articulate-parser application.
// It implements the interfaces defined in the interfaces package.
package services package services
import ( import (
"bytes" "regexp"
stdhtml "html"
"io"
"strings" "strings"
"golang.org/x/net/html"
) )
// HTMLCleaner provides utilities for converting HTML content to plain text. // HTMLCleaner provides utilities for converting HTML content to plain text.
// It removes HTML tags while preserving their content and converts HTML entities // It removes HTML tags while preserving their content and converts HTML entities
// to their plain text equivalents using proper HTML parsing instead of regex. // to their plain text equivalents.
type HTMLCleaner struct{} type HTMLCleaner struct{}
// NewHTMLCleaner creates a new HTML cleaner instance. // NewHTMLCleaner creates a new HTML cleaner instance.
@ -22,47 +20,34 @@ func NewHTMLCleaner() *HTMLCleaner {
} }
// CleanHTML removes HTML tags and converts entities, returning clean plain text. // CleanHTML removes HTML tags and converts entities, returning clean plain text.
// It parses the HTML into a node tree and extracts only text content, // The function preserves the textual content of the HTML while removing markup.
// skipping script and style tags. HTML entities are automatically handled // It handles common HTML entities like &nbsp;, &amp;, etc., and normalizes whitespace.
// by the parser, and whitespace is normalized. //
func (h *HTMLCleaner) CleanHTML(htmlStr string) string { // Parameters:
// Parse the HTML into a node tree // - html: The HTML content to clean
doc, err := html.Parse(strings.NewReader(htmlStr)) //
if err != nil { // Returns:
// If parsing fails, return empty string // - A plain text string with all HTML elements and entities removed/converted
// This maintains backward compatibility with the test expectations func (h *HTMLCleaner) CleanHTML(html string) string {
return "" // Remove HTML tags but preserve content
} re := regexp.MustCompile(`<[^>]*>`)
cleaned := re.ReplaceAllString(html, "")
// Extract text content from the node tree // Replace common HTML entities with their character equivalents
var buf bytes.Buffer cleaned = strings.ReplaceAll(cleaned, "&nbsp;", " ")
extractText(&buf, doc) cleaned = strings.ReplaceAll(cleaned, "&amp;", "&")
cleaned = strings.ReplaceAll(cleaned, "&lt;", "<")
cleaned = strings.ReplaceAll(cleaned, "&gt;", ">")
cleaned = strings.ReplaceAll(cleaned, "&quot;", "\"")
cleaned = strings.ReplaceAll(cleaned, "&#39;", "'")
cleaned = strings.ReplaceAll(cleaned, "&iuml;", "ï")
cleaned = strings.ReplaceAll(cleaned, "&euml;", "ë")
cleaned = strings.ReplaceAll(cleaned, "&eacute;", "é")
// Unescape any remaining HTML entities // Clean up extra whitespace by replacing multiple spaces, tabs, and newlines
unescaped := stdhtml.UnescapeString(buf.String()) // with a single space, then trim any leading/trailing whitespace
cleaned = regexp.MustCompile(`\s+`).ReplaceAllString(cleaned, " ")
cleaned = strings.TrimSpace(cleaned)
// Normalize whitespace: replace multiple spaces, tabs, and newlines with a single space return cleaned
cleaned := strings.Join(strings.Fields(unescaped), " ")
return strings.TrimSpace(cleaned)
}
// extractText recursively traverses the HTML node tree and extracts text content.
// It skips script and style tags to avoid including their content in the output.
func extractText(w io.Writer, n *html.Node) {
// Skip script and style tags entirely
if n.Type == html.ElementNode && (n.Data == "script" || n.Data == "style") {
return
}
// If this is a text node, write its content
if n.Type == html.TextNode {
// Write errors are ignored because we're writing to an in-memory buffer
// which cannot fail in normal circumstances
_, _ = w.Write([]byte(n.Data))
}
// Recursively process all child nodes
for c := n.FirstChild; c != nil; c = c.NextSibling {
extractText(w, c)
}
} }

View File

@ -1,3 +1,4 @@
// Package services_test provides tests for the HTML cleaner service.
package services package services
import ( import (
@ -111,7 +112,7 @@ func TestHTMLCleaner_CleanHTML(t *testing.T) {
{ {
name: "script and style tags content", name: "script and style tags content",
input: "<script>alert('test');</script>Content<style>body{color:red;}</style>", input: "<script>alert('test');</script>Content<style>body{color:red;}</style>",
expected: "Content", // Script and style tags are correctly skipped expected: "alert('test');Contentbody{color:red;}",
}, },
{ {
name: "line breaks and formatting", name: "line breaks and formatting",
@ -146,7 +147,7 @@ func TestHTMLCleaner_CleanHTML(t *testing.T) {
{ {
name: "special HTML5 entities", name: "special HTML5 entities",
input: "Left arrow &larr; Right arrow &rarr;", input: "Left arrow &larr; Right arrow &rarr;",
expected: "Left arrow Right arrow ", // HTML5 entities are properly handled by the parser expected: "Left arrow &larr; Right arrow &rarr;", // These are not handled by the cleaner
}, },
} }
@ -167,7 +168,7 @@ func TestHTMLCleaner_CleanHTML_LargeContent(t *testing.T) {
// Create a large HTML string // Create a large HTML string
var builder strings.Builder var builder strings.Builder
builder.WriteString("<html><body>") builder.WriteString("<html><body>")
for i := range 1000 { for i := 0; i < 1000; i++ {
builder.WriteString("<p>Paragraph ") builder.WriteString("<p>Paragraph ")
builder.WriteString(string(rune('0' + i%10))) builder.WriteString(string(rune('0' + i%10)))
builder.WriteString(" with some content &amp; entities.</p>") builder.WriteString(" with some content &amp; entities.</p>")
@ -216,9 +217,9 @@ func TestHTMLCleaner_CleanHTML_EdgeCases(t *testing.T) {
expected: "&&&", expected: "&&&",
}, },
{ {
name: "entities without semicolon (properly converted)", name: "entities without semicolon (should not be converted)",
input: "&amp test &lt test", input: "&amp test &lt test",
expected: "& test < test", // Parser handles entities even without semicolons in some cases expected: "&amp test &lt test",
}, },
{ {
name: "mixed valid and invalid entities", name: "mixed valid and invalid entities",
@ -233,7 +234,7 @@ func TestHTMLCleaner_CleanHTML_EdgeCases(t *testing.T) {
{ {
name: "tag with no closing bracket", name: "tag with no closing bracket",
input: "Content <p class='test' with no closing bracket", input: "Content <p class='test' with no closing bracket",
expected: "Content", // Parser handles malformed HTML gracefully expected: "Content <p class='test' with no closing bracket",
}, },
{ {
name: "extremely nested tags", name: "extremely nested tags",
@ -298,7 +299,8 @@ func BenchmarkHTMLCleaner_CleanHTML(b *testing.B) {
cleaner := NewHTMLCleaner() cleaner := NewHTMLCleaner()
input := "<div class=\"content\"><h1>Course Title</h1><p>This is a <em>great</em> course about &amp; HTML entities like &nbsp; and &quot;quotes&quot;.</p><ul><li>Item 1</li><li>Item 2</li></ul></div>" input := "<div class=\"content\"><h1>Course Title</h1><p>This is a <em>great</em> course about &amp; HTML entities like &nbsp; and &quot;quotes&quot;.</p><ul><li>Item 1</li><li>Item 2</li></ul></div>"
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
cleaner.CleanHTML(input) cleaner.CleanHTML(input)
} }
} }
@ -309,14 +311,15 @@ func BenchmarkHTMLCleaner_CleanHTML_Large(b *testing.B) {
// Create a large HTML string // Create a large HTML string
var builder strings.Builder var builder strings.Builder
for i := range 100 { for i := 0; i < 100; i++ {
builder.WriteString("<p>Paragraph ") builder.WriteString("<p>Paragraph ")
builder.WriteString(string(rune('0' + i%10))) builder.WriteString(string(rune('0' + i%10)))
builder.WriteString(" with some content &amp; entities &lt;test&gt;.</p>") builder.WriteString(" with some content &amp; entities &lt;test&gt;.</p>")
} }
input := builder.String() input := builder.String()
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
cleaner.CleanHTML(input) cleaner.CleanHTML(input)
} }
} }

View File

@ -1,104 +0,0 @@
package services
import (
"context"
"log/slog"
"os"
"github.com/kjanat/articulate-parser/internal/interfaces"
)
// SlogLogger implements the Logger interface using the standard library's slog package.
type SlogLogger struct {
logger *slog.Logger
}
// NewSlogLogger creates a new structured logger using slog.
// The level parameter controls the minimum log level (debug, info, warn, error).
func NewSlogLogger(level slog.Level) interfaces.Logger {
opts := &slog.HandlerOptions{
Level: level,
}
handler := slog.NewJSONHandler(os.Stdout, opts)
return &SlogLogger{
logger: slog.New(handler),
}
}
// NewTextLogger creates a new structured logger with human-readable text output.
// Useful for development and debugging.
func NewTextLogger(level slog.Level) interfaces.Logger {
opts := &slog.HandlerOptions{
Level: level,
}
handler := slog.NewTextHandler(os.Stdout, opts)
return &SlogLogger{
logger: slog.New(handler),
}
}
// Debug logs a debug-level message with optional key-value pairs.
func (l *SlogLogger) Debug(msg string, keysAndValues ...any) {
l.logger.Debug(msg, keysAndValues...)
}
// Info logs an info-level message with optional key-value pairs.
func (l *SlogLogger) Info(msg string, keysAndValues ...any) {
l.logger.Info(msg, keysAndValues...)
}
// Warn logs a warning-level message with optional key-value pairs.
func (l *SlogLogger) Warn(msg string, keysAndValues ...any) {
l.logger.Warn(msg, keysAndValues...)
}
// Error logs an error-level message with optional key-value pairs.
func (l *SlogLogger) Error(msg string, keysAndValues ...any) {
l.logger.Error(msg, keysAndValues...)
}
// With returns a new logger with the given key-value pairs added as context.
func (l *SlogLogger) With(keysAndValues ...any) interfaces.Logger {
return &SlogLogger{
logger: l.logger.With(keysAndValues...),
}
}
// WithContext returns a new logger with context information.
// Currently preserves the logger as-is, but can be extended to extract
// trace IDs or other context values in the future.
func (l *SlogLogger) WithContext(ctx context.Context) interfaces.Logger {
// Can be extended to extract trace IDs, request IDs, etc. from context
return l
}
// NoOpLogger is a logger that discards all log messages.
// Useful for testing or when logging should be disabled.
type NoOpLogger struct{}
// NewNoOpLogger creates a logger that discards all messages.
func NewNoOpLogger() interfaces.Logger {
return &NoOpLogger{}
}
// Debug does nothing.
func (l *NoOpLogger) Debug(msg string, keysAndValues ...any) {}
// Info does nothing.
func (l *NoOpLogger) Info(msg string, keysAndValues ...any) {}
// Warn does nothing.
func (l *NoOpLogger) Warn(msg string, keysAndValues ...any) {}
// Error does nothing.
func (l *NoOpLogger) Error(msg string, keysAndValues ...any) {}
// With returns the same no-op logger.
func (l *NoOpLogger) With(keysAndValues ...any) interfaces.Logger {
return l
}
// WithContext returns the same no-op logger.
func (l *NoOpLogger) WithContext(ctx context.Context) interfaces.Logger {
return l
}

View File

@ -1,95 +0,0 @@
package services
import (
"context"
"io"
"log/slog"
"testing"
)
// BenchmarkSlogLogger_Info benchmarks structured JSON logging.
func BenchmarkSlogLogger_Info(b *testing.B) {
// Create logger that writes to io.Discard to avoid benchmark noise
opts := &slog.HandlerOptions{Level: slog.LevelInfo}
handler := slog.NewJSONHandler(io.Discard, opts)
logger := &SlogLogger{logger: slog.New(handler)}
b.ResetTimer()
for b.Loop() {
logger.Info("test message", "key1", "value1", "key2", 42, "key3", true)
}
}
// BenchmarkSlogLogger_Debug benchmarks debug level logging.
func BenchmarkSlogLogger_Debug(b *testing.B) {
opts := &slog.HandlerOptions{Level: slog.LevelDebug}
handler := slog.NewJSONHandler(io.Discard, opts)
logger := &SlogLogger{logger: slog.New(handler)}
b.ResetTimer()
for b.Loop() {
logger.Debug("debug message", "operation", "test", "duration", 123)
}
}
// BenchmarkSlogLogger_Error benchmarks error logging.
func BenchmarkSlogLogger_Error(b *testing.B) {
opts := &slog.HandlerOptions{Level: slog.LevelError}
handler := slog.NewJSONHandler(io.Discard, opts)
logger := &SlogLogger{logger: slog.New(handler)}
b.ResetTimer()
for b.Loop() {
logger.Error("error occurred", "error", "test error", "code", 500)
}
}
// BenchmarkTextLogger_Info benchmarks text logging.
func BenchmarkTextLogger_Info(b *testing.B) {
opts := &slog.HandlerOptions{Level: slog.LevelInfo}
handler := slog.NewTextHandler(io.Discard, opts)
logger := &SlogLogger{logger: slog.New(handler)}
b.ResetTimer()
for b.Loop() {
logger.Info("test message", "key1", "value1", "key2", 42)
}
}
// BenchmarkNoOpLogger benchmarks the no-op logger.
func BenchmarkNoOpLogger(b *testing.B) {
logger := NewNoOpLogger()
b.ResetTimer()
for b.Loop() {
logger.Info("test message", "key1", "value1", "key2", 42)
logger.Error("error message", "error", "test")
}
}
// BenchmarkLogger_With benchmarks logger with context.
func BenchmarkLogger_With(b *testing.B) {
opts := &slog.HandlerOptions{Level: slog.LevelInfo}
handler := slog.NewJSONHandler(io.Discard, opts)
logger := &SlogLogger{logger: slog.New(handler)}
b.ResetTimer()
for b.Loop() {
contextLogger := logger.With("request_id", "123", "user_id", "456")
contextLogger.Info("operation completed")
}
}
// BenchmarkLogger_WithContext benchmarks logger with Go context.
func BenchmarkLogger_WithContext(b *testing.B) {
opts := &slog.HandlerOptions{Level: slog.LevelInfo}
handler := slog.NewJSONHandler(io.Discard, opts)
logger := &SlogLogger{logger: slog.New(handler)}
ctx := context.Background()
b.ResetTimer()
for b.Loop() {
contextLogger := logger.WithContext(ctx)
contextLogger.Info("context operation")
}
}

View File

@ -1,7 +1,8 @@
// Package services provides the core functionality for the articulate-parser application.
// It implements the interfaces defined in the interfaces package.
package services package services
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@ -22,36 +23,32 @@ type ArticulateParser struct {
BaseURL string BaseURL string
// Client is the HTTP client used to make requests to the API // Client is the HTTP client used to make requests to the API
Client *http.Client Client *http.Client
// Logger for structured logging
Logger interfaces.Logger
} }
// NewArticulateParser creates a new ArticulateParser instance. // NewArticulateParser creates a new ArticulateParser instance with default settings.
// If baseURL is empty, uses the default Articulate Rise API URL. // The default configuration uses the standard Articulate Rise API URL and a
// If timeout is zero, uses a 30-second timeout. // HTTP client with a 30-second timeout.
func NewArticulateParser(logger interfaces.Logger, baseURL string, timeout time.Duration) interfaces.CourseParser { func NewArticulateParser() interfaces.CourseParser {
if logger == nil {
logger = NewNoOpLogger()
}
if baseURL == "" {
baseURL = "https://rise.articulate.com"
}
if timeout == 0 {
timeout = 30 * time.Second
}
return &ArticulateParser{ return &ArticulateParser{
BaseURL: baseURL, BaseURL: "https://rise.articulate.com",
Client: &http.Client{ Client: &http.Client{
Timeout: timeout, Timeout: 30 * time.Second,
}, },
Logger: logger,
} }
} }
// FetchCourse fetches a course from the given URI and returns the parsed course data. // FetchCourse fetches a course from the given URI.
// The URI should be an Articulate Rise share URL (e.g., https://rise.articulate.com/share/SHARE_ID). // It extracts the share ID from the URI, constructs an API URL, and fetches the course data.
// The context can be used for cancellation and timeout control. // The course data is then unmarshalled into a Course model.
func (p *ArticulateParser) FetchCourse(ctx context.Context, uri string) (*models.Course, error) { //
// Parameters:
// - uri: The Articulate Rise share URL (e.g., https://rise.articulate.com/share/SHARE_ID)
//
// Returns:
// - A parsed Course model if successful
// - An error if the fetch fails, if the share ID can't be extracted,
// or if the response can't be parsed
func (p *ArticulateParser) FetchCourse(uri string) (*models.Course, error) {
shareID, err := p.extractShareID(uri) shareID, err := p.extractShareID(uri)
if err != nil { if err != nil {
return nil, err return nil, err
@ -59,24 +56,11 @@ func (p *ArticulateParser) FetchCourse(ctx context.Context, uri string) (*models
apiURL := p.buildAPIURL(shareID) apiURL := p.buildAPIURL(shareID)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, apiURL, http.NoBody) resp, err := p.Client.Get(apiURL)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
resp, err := p.Client.Do(req)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to fetch course data: %w", err) return nil, fmt.Errorf("failed to fetch course data: %w", err)
} }
// Ensure response body is closed even if ReadAll fails. Close errors are logged defer resp.Body.Close()
// but not fatal since the body content has already been read and parsed. In the
// context of HTTP responses, the body must be closed to release the underlying
// connection, but a close error doesn't invalidate the data already consumed.
defer func() {
if err := resp.Body.Close(); err != nil {
p.Logger.Warn("failed to close response body", "error", err, "url", apiURL)
}
}()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("API returned status %d", resp.StatusCode) return nil, fmt.Errorf("API returned status %d", resp.StatusCode)
@ -96,8 +80,15 @@ func (p *ArticulateParser) FetchCourse(ctx context.Context, uri string) (*models
} }
// LoadCourseFromFile loads an Articulate Rise course from a local JSON file. // LoadCourseFromFile loads an Articulate Rise course from a local JSON file.
// The file should contain a valid JSON representation of an Articulate Rise course.
//
// Parameters:
// - filePath: The path to the JSON file containing the course data
//
// Returns:
// - A parsed Course model if successful
// - An error if the file can't be read or the JSON can't be parsed
func (p *ArticulateParser) LoadCourseFromFile(filePath string) (*models.Course, error) { func (p *ArticulateParser) LoadCourseFromFile(filePath string) (*models.Course, error) {
// #nosec G304 - File path is provided by user via CLI argument, which is expected behavior
data, err := os.ReadFile(filePath) data, err := os.ReadFile(filePath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read file: %w", err) return nil, fmt.Errorf("failed to read file: %w", err)

View File

@ -1,219 +0,0 @@
package services
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"github.com/kjanat/articulate-parser/internal/models"
)
// BenchmarkArticulateParser_FetchCourse benchmarks the FetchCourse method.
func BenchmarkArticulateParser_FetchCourse(b *testing.B) {
testCourse := &models.Course{
ShareID: "benchmark-id",
Author: "Benchmark Author",
Course: models.CourseInfo{
ID: "bench-course",
Title: "Benchmark Course",
Description: "Testing performance",
Lessons: []models.Lesson{
{
ID: "lesson1",
Title: "Lesson 1",
Type: "lesson",
},
},
},
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// Encode errors are ignored in benchmarks; the test server's ResponseWriter
// writes are reliable and any encoding error would be a test setup issue
_ = json.NewEncoder(w).Encode(testCourse)
}))
defer server.Close()
parser := &ArticulateParser{
BaseURL: server.URL,
Client: &http.Client{},
Logger: NewNoOpLogger(),
}
b.ResetTimer()
for b.Loop() {
_, err := parser.FetchCourse(context.Background(), "https://rise.articulate.com/share/benchmark-id")
if err != nil {
b.Fatalf("FetchCourse failed: %v", err)
}
}
}
// BenchmarkArticulateParser_FetchCourse_LargeCourse benchmarks with a large course.
func BenchmarkArticulateParser_FetchCourse_LargeCourse(b *testing.B) {
// Create a large course with many lessons
lessons := make([]models.Lesson, 100)
for i := range 100 {
lessons[i] = models.Lesson{
ID: string(rune(i)),
Title: "Lesson " + string(rune(i)),
Type: "lesson",
Description: "This is a test lesson with some description",
Items: []models.Item{
{
Type: "text",
Items: []models.SubItem{
{
Heading: "Test Heading",
Paragraph: "Test paragraph content with some text",
},
},
},
},
}
}
testCourse := &models.Course{
ShareID: "large-course-id",
Author: "Benchmark Author",
Course: models.CourseInfo{
ID: "large-course",
Title: "Large Benchmark Course",
Description: "Testing performance with large course",
Lessons: lessons,
},
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// Encode errors are ignored in benchmarks; the test server's ResponseWriter
// writes are reliable and any encoding error would be a test setup issue
_ = json.NewEncoder(w).Encode(testCourse)
}))
defer server.Close()
parser := &ArticulateParser{
BaseURL: server.URL,
Client: &http.Client{},
Logger: NewNoOpLogger(),
}
b.ResetTimer()
for b.Loop() {
_, err := parser.FetchCourse(context.Background(), "https://rise.articulate.com/share/large-course-id")
if err != nil {
b.Fatalf("FetchCourse failed: %v", err)
}
}
}
// BenchmarkArticulateParser_LoadCourseFromFile benchmarks loading from file.
func BenchmarkArticulateParser_LoadCourseFromFile(b *testing.B) {
testCourse := &models.Course{
ShareID: "file-test-id",
Course: models.CourseInfo{
Title: "File Test Course",
},
}
tempDir := b.TempDir()
tempFile := filepath.Join(tempDir, "benchmark.json")
data, err := json.Marshal(testCourse)
if err != nil {
b.Fatalf("Failed to marshal: %v", err)
}
if err := os.WriteFile(tempFile, data, 0o644); err != nil {
b.Fatalf("Failed to write file: %v", err)
}
parser := NewArticulateParser(nil, "", 0)
b.ResetTimer()
for b.Loop() {
_, err := parser.LoadCourseFromFile(tempFile)
if err != nil {
b.Fatalf("LoadCourseFromFile failed: %v", err)
}
}
}
// BenchmarkArticulateParser_LoadCourseFromFile_Large benchmarks with large file.
func BenchmarkArticulateParser_LoadCourseFromFile_Large(b *testing.B) {
// Create a large course
lessons := make([]models.Lesson, 200)
for i := range 200 {
lessons[i] = models.Lesson{
ID: string(rune(i)),
Title: "Lesson " + string(rune(i)),
Type: "lesson",
Items: []models.Item{
{Type: "text", Items: []models.SubItem{{Heading: "H", Paragraph: "P"}}},
{Type: "list", Items: []models.SubItem{{Paragraph: "Item 1"}, {Paragraph: "Item 2"}}},
},
}
}
testCourse := &models.Course{
ShareID: "large-file-id",
Course: models.CourseInfo{
Title: "Large File Course",
Lessons: lessons,
},
}
tempDir := b.TempDir()
tempFile := filepath.Join(tempDir, "large-benchmark.json")
data, err := json.Marshal(testCourse)
if err != nil {
b.Fatalf("Failed to marshal: %v", err)
}
if err := os.WriteFile(tempFile, data, 0o644); err != nil {
b.Fatalf("Failed to write file: %v", err)
}
parser := NewArticulateParser(nil, "", 0)
b.ResetTimer()
for b.Loop() {
_, err := parser.LoadCourseFromFile(tempFile)
if err != nil {
b.Fatalf("LoadCourseFromFile failed: %v", err)
}
}
}
// BenchmarkArticulateParser_ExtractShareID benchmarks share ID extraction.
func BenchmarkArticulateParser_ExtractShareID(b *testing.B) {
parser := &ArticulateParser{}
uri := "https://rise.articulate.com/share/N_APNg40Vr2CSH2xNz-ZLATM5kNviDIO#/"
b.ResetTimer()
for b.Loop() {
_, err := parser.extractShareID(uri)
if err != nil {
b.Fatalf("extractShareID failed: %v", err)
}
}
}
// BenchmarkArticulateParser_BuildAPIURL benchmarks API URL building.
func BenchmarkArticulateParser_BuildAPIURL(b *testing.B) {
parser := &ArticulateParser{
BaseURL: "https://rise.articulate.com",
}
shareID := "test-share-id-12345"
b.ResetTimer()
for b.Loop() {
_ = parser.buildAPIURL(shareID)
}
}

View File

@ -1,289 +0,0 @@
package services
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/kjanat/articulate-parser/internal/models"
)
// TestArticulateParser_FetchCourse_ContextCancellation tests that FetchCourse
// respects context cancellation.
func TestArticulateParser_FetchCourse_ContextCancellation(t *testing.T) {
// Create a server that delays response
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Sleep to give time for context cancellation
time.Sleep(100 * time.Millisecond)
testCourse := &models.Course{
ShareID: "test-id",
Course: models.CourseInfo{
Title: "Test Course",
},
}
// Encode errors are ignored in test setup; httptest.ResponseWriter is reliable
_ = json.NewEncoder(w).Encode(testCourse)
}))
defer server.Close()
parser := &ArticulateParser{
BaseURL: server.URL,
Client: &http.Client{
Timeout: 5 * time.Second,
},
Logger: NewNoOpLogger(),
}
// Create a context that we'll cancel immediately
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
_, err := parser.FetchCourse(ctx, "https://rise.articulate.com/share/test-id")
// Should get a context cancellation error
if err == nil {
t.Fatal("Expected error due to context cancellation, got nil")
}
if !strings.Contains(err.Error(), "context canceled") {
t.Errorf("Expected context cancellation error, got: %v", err)
}
}
// TestArticulateParser_FetchCourse_ContextTimeout tests that FetchCourse
// respects context timeout.
func TestArticulateParser_FetchCourse_ContextTimeout(t *testing.T) {
// Create a server that delays response longer than timeout
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Sleep longer than the context timeout
time.Sleep(200 * time.Millisecond)
testCourse := &models.Course{
ShareID: "test-id",
Course: models.CourseInfo{
Title: "Test Course",
},
}
// Encode errors are ignored in test setup; httptest.ResponseWriter is reliable
_ = json.NewEncoder(w).Encode(testCourse)
}))
defer server.Close()
parser := &ArticulateParser{
BaseURL: server.URL,
Client: &http.Client{
Timeout: 5 * time.Second,
},
Logger: NewNoOpLogger(),
}
// Create a context with a very short timeout
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
_, err := parser.FetchCourse(ctx, "https://rise.articulate.com/share/test-id")
// Should get a context deadline exceeded error
if err == nil {
t.Fatal("Expected error due to context timeout, got nil")
}
if !strings.Contains(err.Error(), "deadline exceeded") &&
!strings.Contains(err.Error(), "context deadline exceeded") {
t.Errorf("Expected context timeout error, got: %v", err)
}
}
// TestArticulateParser_FetchCourse_ContextDeadline tests that FetchCourse
// respects context deadline.
func TestArticulateParser_FetchCourse_ContextDeadline(t *testing.T) {
// Create a server that delays response
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(150 * time.Millisecond)
testCourse := &models.Course{
ShareID: "test-id",
Course: models.CourseInfo{
Title: "Test Course",
},
}
// Encode errors are ignored in test setup; httptest.ResponseWriter is reliable
_ = json.NewEncoder(w).Encode(testCourse)
}))
defer server.Close()
parser := &ArticulateParser{
BaseURL: server.URL,
Client: &http.Client{
Timeout: 5 * time.Second,
},
Logger: NewNoOpLogger(),
}
// Create a context with a deadline in the past
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Millisecond))
defer cancel()
_, err := parser.FetchCourse(ctx, "https://rise.articulate.com/share/test-id")
// Should get a deadline exceeded error
if err == nil {
t.Fatal("Expected error due to context deadline, got nil")
}
if !strings.Contains(err.Error(), "deadline exceeded") &&
!strings.Contains(err.Error(), "context deadline exceeded") {
t.Errorf("Expected deadline exceeded error, got: %v", err)
}
}
// TestArticulateParser_FetchCourse_ContextSuccess tests that FetchCourse
// succeeds when context is not canceled.
func TestArticulateParser_FetchCourse_ContextSuccess(t *testing.T) {
testCourse := &models.Course{
ShareID: "test-id",
Course: models.CourseInfo{
Title: "Test Course",
},
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Respond quickly
// Encode errors are ignored in test setup; httptest.ResponseWriter is reliable
_ = json.NewEncoder(w).Encode(testCourse)
}))
defer server.Close()
parser := &ArticulateParser{
BaseURL: server.URL,
Client: &http.Client{
Timeout: 5 * time.Second,
},
Logger: NewNoOpLogger(),
}
// Create a context with generous timeout
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
course, err := parser.FetchCourse(ctx, "https://rise.articulate.com/share/test-id")
if err != nil {
t.Fatalf("Expected no error, got: %v", err)
}
if course == nil {
t.Fatal("Expected course, got nil")
}
if course.Course.Title != testCourse.Course.Title {
t.Errorf("Expected title '%s', got '%s'", testCourse.Course.Title, course.Course.Title)
}
}
// TestArticulateParser_FetchCourse_CancellationDuringRequest tests cancellation
// during an in-flight request.
func TestArticulateParser_FetchCourse_CancellationDuringRequest(t *testing.T) {
requestStarted := make(chan bool)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestStarted <- true
// Keep the handler running to simulate slow response
time.Sleep(300 * time.Millisecond)
testCourse := &models.Course{
ShareID: "test-id",
}
// Encode errors are ignored in test setup; httptest.ResponseWriter is reliable
_ = json.NewEncoder(w).Encode(testCourse)
}))
defer server.Close()
parser := &ArticulateParser{
BaseURL: server.URL,
Client: &http.Client{
Timeout: 5 * time.Second,
},
Logger: NewNoOpLogger(),
}
ctx, cancel := context.WithCancel(context.Background())
// Start the request in a goroutine
errChan := make(chan error, 1)
go func() {
_, err := parser.FetchCourse(ctx, "https://rise.articulate.com/share/test-id")
errChan <- err
}()
// Wait for request to start
<-requestStarted
// Cancel after request has started
cancel()
// Get the error
err := <-errChan
if err == nil {
t.Fatal("Expected error due to context cancellation, got nil")
}
// Should contain context canceled somewhere in the error chain
if !strings.Contains(err.Error(), "context canceled") {
t.Errorf("Expected context canceled error, got: %v", err)
}
}
// TestArticulateParser_FetchCourse_MultipleTimeouts tests behavior with
// multiple concurrent requests and timeouts.
func TestArticulateParser_FetchCourse_MultipleTimeouts(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(100 * time.Millisecond)
testCourse := &models.Course{ShareID: "test"}
// Encode errors are ignored in test setup; httptest.ResponseWriter is reliable
_ = json.NewEncoder(w).Encode(testCourse)
}))
defer server.Close()
parser := &ArticulateParser{
BaseURL: server.URL,
Client: &http.Client{
Timeout: 5 * time.Second,
},
Logger: NewNoOpLogger(),
}
// Launch multiple requests with different timeouts
tests := []struct {
name string
timeout time.Duration
shouldSucceed bool
}{
{"very short timeout", 10 * time.Millisecond, false},
{"short timeout", 50 * time.Millisecond, false},
{"adequate timeout", 500 * time.Millisecond, true},
{"long timeout", 2 * time.Second, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), tt.timeout)
defer cancel()
_, err := parser.FetchCourse(ctx, "https://rise.articulate.com/share/test-id")
if tt.shouldSucceed && err != nil {
t.Errorf("Expected success with timeout %v, got error: %v", tt.timeout, err)
}
if !tt.shouldSucceed && err == nil {
t.Errorf("Expected timeout error with timeout %v, got success", tt.timeout)
}
})
}
}

View File

@ -1,7 +1,7 @@
// Package services_test provides tests for the parser service.
package services package services
import ( import (
"context"
"encoding/json" "encoding/json"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -16,7 +16,7 @@ import (
// TestNewArticulateParser tests the NewArticulateParser constructor. // TestNewArticulateParser tests the NewArticulateParser constructor.
func TestNewArticulateParser(t *testing.T) { func TestNewArticulateParser(t *testing.T) {
parser := NewArticulateParser(nil, "", 0) parser := NewArticulateParser()
if parser == nil { if parser == nil {
t.Fatal("NewArticulateParser() returned nil") t.Fatal("NewArticulateParser() returned nil")
@ -112,7 +112,7 @@ func TestArticulateParser_FetchCourse(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
course, err := parser.FetchCourse(context.Background(), tt.uri) course, err := parser.FetchCourse(tt.uri)
if tt.expectedError != "" { if tt.expectedError != "" {
if err == nil { if err == nil {
@ -146,7 +146,7 @@ func TestArticulateParser_FetchCourse_NetworkError(t *testing.T) {
}, },
} }
_, err := parser.FetchCourse(context.Background(), "https://rise.articulate.com/share/test-share-id") _, err := parser.FetchCourse("https://rise.articulate.com/share/test-share-id")
if err == nil { if err == nil {
t.Fatal("Expected network error, got nil") t.Fatal("Expected network error, got nil")
} }
@ -161,10 +161,7 @@ func TestArticulateParser_FetchCourse_InvalidJSON(t *testing.T) {
// Create test server that returns invalid JSON // Create test server that returns invalid JSON
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
// Write is used for its side effect; the test verifies error handling on w.Write([]byte("invalid json"))
// the client side, not whether the write succeeds. Ignore the error since
// httptest.ResponseWriter writes are rarely problematic in test contexts.
_, _ = w.Write([]byte("invalid json"))
})) }))
defer server.Close() defer server.Close()
@ -175,7 +172,7 @@ func TestArticulateParser_FetchCourse_InvalidJSON(t *testing.T) {
}, },
} }
_, err := parser.FetchCourse(context.Background(), "https://rise.articulate.com/share/test-share-id") _, err := parser.FetchCourse("https://rise.articulate.com/share/test-share-id")
if err == nil { if err == nil {
t.Fatal("Expected JSON parsing error, got nil") t.Fatal("Expected JSON parsing error, got nil")
} }
@ -208,11 +205,11 @@ func TestArticulateParser_LoadCourseFromFile(t *testing.T) {
t.Fatalf("Failed to marshal test course: %v", err) t.Fatalf("Failed to marshal test course: %v", err)
} }
if err := os.WriteFile(tempFile, data, 0o644); err != nil { if err := os.WriteFile(tempFile, data, 0644); err != nil {
t.Fatalf("Failed to write test file: %v", err) t.Fatalf("Failed to write test file: %v", err)
} }
parser := NewArticulateParser(nil, "", 0) parser := NewArticulateParser()
tests := []struct { tests := []struct {
name string name string
@ -267,11 +264,11 @@ func TestArticulateParser_LoadCourseFromFile_InvalidJSON(t *testing.T) {
tempDir := t.TempDir() tempDir := t.TempDir()
tempFile := filepath.Join(tempDir, "invalid.json") tempFile := filepath.Join(tempDir, "invalid.json")
if err := os.WriteFile(tempFile, []byte("invalid json content"), 0o644); err != nil { if err := os.WriteFile(tempFile, []byte("invalid json content"), 0644); err != nil {
t.Fatalf("Failed to write test file: %v", err) t.Fatalf("Failed to write test file: %v", err)
} }
parser := NewArticulateParser(nil, "", 0) parser := NewArticulateParser()
_, err := parser.LoadCourseFromFile(tempFile) _, err := parser.LoadCourseFromFile(tempFile)
if err == nil { if err == nil {
@ -423,7 +420,8 @@ func BenchmarkExtractShareID(b *testing.B) {
parser := &ArticulateParser{} parser := &ArticulateParser{}
uri := "https://rise.articulate.com/share/N_APNg40Vr2CSH2xNz-ZLATM5kNviDIO#/" uri := "https://rise.articulate.com/share/N_APNg40Vr2CSH2xNz-ZLATM5kNviDIO#/"
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = parser.extractShareID(uri) _, _ = parser.extractShareID(uri)
} }
} }
@ -435,7 +433,8 @@ func BenchmarkBuildAPIURL(b *testing.B) {
} }
shareID := "N_APNg40Vr2CSH2xNz-ZLATM5kNviDIO" shareID := "N_APNg40Vr2CSH2xNz-ZLATM5kNviDIO"
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = parser.buildAPIURL(shareID) _ = parser.buildAPIURL(shareID)
} }
} }

View File

@ -5,17 +5,7 @@ package version
// Version information. // Version information.
var ( var (
// Version is the current version of the application. // Version is the current version of the application.
// Breaking changes from 0.4.x: Version = "0.4.1"
// - Renamed GetSupportedFormat() -> SupportedFormat()
// - Renamed GetSupportedFormats() -> SupportedFormats()
// - FetchCourse now requires context.Context parameter
// - NewArticulateParser now accepts logger, baseURL, timeout
// New features:
// - Structured logging with slog
// - Configuration via environment variables
// - Context-aware HTTP requests
// - Comprehensive benchmarks and examples.
Version = "1.0.0"
// BuildTime is the time the binary was built. // BuildTime is the time the binary was built.
BuildTime = "unknown" BuildTime = "unknown"

28
main.go
View File

@ -4,14 +4,12 @@
package main package main
import ( import (
"context"
"fmt" "fmt"
"log"
"os" "os"
"strings" "strings"
"github.com/kjanat/articulate-parser/internal/config"
"github.com/kjanat/articulate-parser/internal/exporters" "github.com/kjanat/articulate-parser/internal/exporters"
"github.com/kjanat/articulate-parser/internal/interfaces"
"github.com/kjanat/articulate-parser/internal/services" "github.com/kjanat/articulate-parser/internal/services"
"github.com/kjanat/articulate-parser/internal/version" "github.com/kjanat/articulate-parser/internal/version"
) )
@ -26,19 +24,9 @@ func main() {
// run contains the main application logic and returns an exit code. // run contains the main application logic and returns an exit code.
// This function is testable as it doesn't call os.Exit directly. // This function is testable as it doesn't call os.Exit directly.
func run(args []string) int { func run(args []string) int {
// Load configuration // Dependency injection setup
cfg := config.Load()
// Dependency injection setup with configuration
var logger interfaces.Logger
if cfg.LogFormat == "json" {
logger = services.NewSlogLogger(cfg.LogLevel)
} else {
logger = services.NewTextLogger(cfg.LogLevel)
}
htmlCleaner := services.NewHTMLCleaner() htmlCleaner := services.NewHTMLCleaner()
parser := services.NewArticulateParser(logger, cfg.BaseURL, cfg.RequestTimeout) parser := services.NewArticulateParser()
exporterFactory := exporters.NewFactory(htmlCleaner) exporterFactory := exporters.NewFactory(htmlCleaner)
app := services.NewApp(parser, exporterFactory) app := services.NewApp(parser, exporterFactory)
@ -52,13 +40,13 @@ func run(args []string) int {
// Check for help flag // Check for help flag
if len(args) > 1 && (args[1] == "--help" || args[1] == "-h" || args[1] == "help") { if len(args) > 1 && (args[1] == "--help" || args[1] == "-h" || args[1] == "help") {
printUsage(args[0], app.SupportedFormats()) printUsage(args[0], app.GetSupportedFormats())
return 0 return 0
} }
// Check for required command-line arguments // Check for required command-line arguments
if len(args) < 4 { if len(args) < 4 {
printUsage(args[0], app.SupportedFormats()) printUsage(args[0], app.GetSupportedFormats())
return 1 return 1
} }
@ -70,17 +58,17 @@ func run(args []string) int {
// Determine if source is a URI or file path // Determine if source is a URI or file path
if isURI(source) { if isURI(source) {
err = app.ProcessCourseFromURI(context.Background(), source, format, output) err = app.ProcessCourseFromURI(source, format, output)
} else { } else {
err = app.ProcessCourseFromFile(source, format, output) err = app.ProcessCourseFromFile(source, format, output)
} }
if err != nil { if err != nil {
logger.Error("failed to process course", "error", err, "source", source) log.Printf("Error processing course: %v", err)
return 1 return 1
} }
logger.Info("successfully exported course", "output", output, "format", format) fmt.Printf("Successfully exported course to %s\n", output)
return 0 return 0
} }

View File

@ -1,7 +1,9 @@
// Package main_test provides tests for the main package utility functions.
package main package main
import ( import (
"bytes" "bytes"
"fmt"
"io" "io"
"log" "log"
"os" "os"
@ -87,7 +89,8 @@ func TestIsURI(t *testing.T) {
func BenchmarkIsURI(b *testing.B) { func BenchmarkIsURI(b *testing.B) {
testStr := "https://rise.articulate.com/share/N_APNg40Vr2CSH2xNz-ZLATM5kNviDIO#/" testStr := "https://rise.articulate.com/share/N_APNg40Vr2CSH2xNz-ZLATM5kNviDIO#/"
for b.Loop() { b.ResetTimer()
for i := 0; i < b.N; i++ {
isURI(testStr) isURI(testStr)
} }
} }
@ -122,16 +125,13 @@ func TestRunWithInsufficientArgs(t *testing.T) {
// Run the function // Run the function
exitCode := run(tt.args) exitCode := run(tt.args)
// Restore stdout. Close errors are ignored: we've already captured the // Restore stdout
// output before closing, and any close error doesn't affect test validity. w.Close()
_ = w.Close()
os.Stdout = oldStdout os.Stdout = oldStdout
// Read captured output. Copy errors are ignored: in this test context, // Read captured output
// reading from a pipe that was just closed is not expected to fail, and
// we're verifying the captured output regardless.
var buf bytes.Buffer var buf bytes.Buffer
_, _ = io.Copy(&buf, r) io.Copy(&buf, r)
output := buf.String() output := buf.String()
// Verify exit code // Verify exit code
@ -166,15 +166,13 @@ func TestRunWithHelpFlags(t *testing.T) {
args := []string{"articulate-parser", flag} args := []string{"articulate-parser", flag}
exitCode := run(args) exitCode := run(args)
// Restore stdout. Close errors are ignored: the pipe write end is already // Restore stdout
// closed before reading, and any close error doesn't affect the test. w.Close()
_ = w.Close()
os.Stdout = oldStdout os.Stdout = oldStdout
// Read captured output. Copy errors are ignored: we successfully wrote // Read captured output
// the help output to the pipe and can verify it regardless of close semantics.
var buf bytes.Buffer var buf bytes.Buffer
_, _ = io.Copy(&buf, r) io.Copy(&buf, r)
output := buf.String() output := buf.String()
// Verify exit code is 0 (success) // Verify exit code is 0 (success)
@ -217,15 +215,13 @@ func TestRunWithVersionFlags(t *testing.T) {
args := []string{"articulate-parser", flag} args := []string{"articulate-parser", flag}
exitCode := run(args) exitCode := run(args)
// Restore stdout. Close errors are ignored: the version output has already // Restore stdout
// been written and we're about to read it; close semantics don't affect correctness. w.Close()
_ = w.Close()
os.Stdout = oldStdout os.Stdout = oldStdout
// Read captured output. Copy errors are ignored: the output was successfully // Read captured output
// produced and we can verify its contents regardless of any I/O edge cases.
var buf bytes.Buffer var buf bytes.Buffer
_, _ = io.Copy(&buf, r) io.Copy(&buf, r)
output := buf.String() output := buf.String()
// Verify exit code is 0 (success) // Verify exit code is 0 (success)
@ -269,36 +265,30 @@ func TestRunWithInvalidFile(t *testing.T) {
args := []string{"articulate-parser", "nonexistent-file.json", "markdown", "output.md"} args := []string{"articulate-parser", "nonexistent-file.json", "markdown", "output.md"}
exitCode := run(args) exitCode := run(args)
// Restore stdout/stderr and log output. Close errors are ignored: we've already // Restore stdout/stderr and log output
// written all error messages to these pipes before closing them, and the test stdoutW.Close()
// only cares about verifying the captured output. stderrW.Close()
_ = stdoutW.Close()
_ = stderrW.Close()
os.Stdout = oldStdout os.Stdout = oldStdout
os.Stderr = oldStderr os.Stderr = oldStderr
log.SetOutput(oldLogOutput) log.SetOutput(oldLogOutput)
// Read captured output. Copy errors are ignored: the error messages have been // Read captured output
// successfully written to the pipes, and we can verify the output content
// regardless of any edge cases in pipe closure or I/O completion.
var stdoutBuf, stderrBuf bytes.Buffer var stdoutBuf, stderrBuf bytes.Buffer
_, _ = io.Copy(&stdoutBuf, stdoutR) io.Copy(&stdoutBuf, stdoutR)
_, _ = io.Copy(&stderrBuf, stderrR) io.Copy(&stderrBuf, stderrR)
// Close read ends of pipes. Errors ignored: we've already consumed all data stdoutR.Close()
// from these pipes, and close errors don't affect test assertions. stderrR.Close()
_ = stdoutR.Close()
_ = stderrR.Close()
// Verify exit code // Verify exit code
if exitCode != 1 { if exitCode != 1 {
t.Errorf("Expected exit code 1 for non-existent file, got %d", exitCode) t.Errorf("Expected exit code 1 for non-existent file, got %d", exitCode)
} }
// Should have error output in structured log format // Should have error output
output := stdoutBuf.String() errorOutput := stderrBuf.String()
if !strings.Contains(output, "level=ERROR") && !strings.Contains(output, "failed to process course") { if !strings.Contains(errorOutput, "Error processing course") {
t.Errorf("Expected error message about processing course, got: %s", output) t.Errorf("Expected error message about processing course, got: %s", errorOutput)
} }
} }
@ -322,36 +312,30 @@ func TestRunWithInvalidURI(t *testing.T) {
args := []string{"articulate-parser", "https://example.com/invalid", "markdown", "output.md"} args := []string{"articulate-parser", "https://example.com/invalid", "markdown", "output.md"}
exitCode := run(args) exitCode := run(args)
// Restore stdout/stderr and log output. Close errors are ignored: we've already // Restore stdout/stderr and log output
// written all error messages about the invalid URI to these pipes before closing, stdoutW.Close()
// and test correctness only depends on verifying the captured error output. stderrW.Close()
_ = stdoutW.Close()
_ = stderrW.Close()
os.Stdout = oldStdout os.Stdout = oldStdout
os.Stderr = oldStderr os.Stderr = oldStderr
log.SetOutput(oldLogOutput) log.SetOutput(oldLogOutput)
// Read captured output. Copy errors are ignored: the error messages have been // Read captured output
// successfully written and we can verify the failure output content regardless
// of any edge cases in pipe lifecycle or I/O synchronization.
var stdoutBuf, stderrBuf bytes.Buffer var stdoutBuf, stderrBuf bytes.Buffer
_, _ = io.Copy(&stdoutBuf, stdoutR) io.Copy(&stdoutBuf, stdoutR)
_, _ = io.Copy(&stderrBuf, stderrR) io.Copy(&stderrBuf, stderrR)
// Close read ends of pipes. Errors ignored: we've already consumed all data stdoutR.Close()
// and close errors don't affect the validation of the error output. stderrR.Close()
_ = stdoutR.Close()
_ = stderrR.Close()
// Should fail because the URI is invalid/unreachable // Should fail because the URI is invalid/unreachable
if exitCode != 1 { if exitCode != 1 {
t.Errorf("Expected failure (exit code 1) for invalid URI, got %d", exitCode) t.Errorf("Expected failure (exit code 1) for invalid URI, got %d", exitCode)
} }
// Should have error output in structured log format // Should have error output
output := stdoutBuf.String() errorOutput := stderrBuf.String()
if !strings.Contains(output, "level=ERROR") && !strings.Contains(output, "failed to process course") { if !strings.Contains(errorOutput, "Error processing course") {
t.Errorf("Expected error message about processing course, got: %s", output) t.Errorf("Expected error message about processing course, got: %s", errorOutput)
} }
} }
@ -381,29 +365,16 @@ func TestRunWithValidJSONFile(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create temp file: %v", err) t.Fatalf("Failed to create temp file: %v", err)
} }
// Ensure temporary test file is cleaned up. Remove errors are ignored because defer os.Remove(tmpFile.Name())
// the test has already used the file for its purpose, and cleanup failures don't
// invalidate the test results (the OS will eventually clean up temp files).
defer func() {
_ = os.Remove(tmpFile.Name())
}()
if _, err := tmpFile.WriteString(testContent); err != nil { if _, err := tmpFile.WriteString(testContent); err != nil {
t.Fatalf("Failed to write test content: %v", err) t.Fatalf("Failed to write test content: %v", err)
} }
// Close the temporary file. Errors are ignored because we've already written tmpFile.Close()
// the test content and the main test logic (loading the file) doesn't depend
// on the success of closing this file descriptor.
_ = tmpFile.Close()
// Test successful run with valid file // Test successful run with valid file
outputFile := "test-output.md" outputFile := "test-output.md"
// Ensure test output file is cleaned up. Remove errors are ignored because the defer os.Remove(outputFile)
// test has already verified the export succeeded; cleanup failures don't affect
// the test assertions.
defer func() {
_ = os.Remove(outputFile)
}()
// Save original stdout // Save original stdout
originalStdout := os.Stdout originalStdout := os.Stdout
@ -416,17 +387,13 @@ func TestRunWithValidJSONFile(t *testing.T) {
args := []string{"articulate-parser", tmpFile.Name(), "markdown", outputFile} args := []string{"articulate-parser", tmpFile.Name(), "markdown", outputFile}
exitCode := run(args) exitCode := run(args)
// Close write end and restore stdout. Close errors are ignored: we've already // Close write end and restore stdout
// written the success message before closing, and any close error doesn't affect w.Close()
// the validity of the captured output or the test assertions.
_ = w.Close()
os.Stdout = originalStdout os.Stdout = originalStdout
// Read captured output. Copy errors are ignored: the success message was // Read captured output
// successfully written to the pipe, and we can verify it regardless of any
// edge cases in pipe closure or I/O synchronization.
var buf bytes.Buffer var buf bytes.Buffer
_, _ = io.Copy(&buf, r) io.Copy(&buf, r)
output := buf.String() output := buf.String()
// Verify successful execution // Verify successful execution
@ -434,9 +401,10 @@ func TestRunWithValidJSONFile(t *testing.T) {
t.Errorf("Expected successful execution (exit code 0), got %d", exitCode) t.Errorf("Expected successful execution (exit code 0), got %d", exitCode)
} }
// Verify success message in structured log format // Verify success message
if !strings.Contains(output, "level=INFO") || !strings.Contains(output, "successfully exported course") { expectedMsg := fmt.Sprintf("Successfully exported course to %s", outputFile)
t.Errorf("Expected success message in output, got: %s", output) if !strings.Contains(output, expectedMsg) {
t.Errorf("Expected success message '%s' in output, got: %s", expectedMsg, output)
} }
// Verify output file was created // Verify output file was created
@ -472,24 +440,17 @@ func TestRunIntegration(t *testing.T) {
args := []string{"articulate-parser", "articulate-sample.json", format.format, format.output} args := []string{"articulate-parser", "articulate-sample.json", format.format, format.output}
exitCode := run(args) exitCode := run(args)
// Restore stdout. Close errors are ignored: the export success message // Restore stdout
// has already been written and we're about to read it; close semantics w.Close()
// don't affect the validity of the captured output.
_ = w.Close()
os.Stdout = oldStdout os.Stdout = oldStdout
// Read captured output. Copy errors are ignored: the output was successfully // Read captured output
// produced and we can verify its contents regardless of any I/O edge cases.
var buf bytes.Buffer var buf bytes.Buffer
_, _ = io.Copy(&buf, r) io.Copy(&buf, r)
output := buf.String() output := buf.String()
// Clean up test file. Remove errors are ignored because the test has // Clean up test file
// already verified the export succeeded; cleanup failures don't affect defer os.Remove(format.output)
// the test assertions.
defer func() {
_ = os.Remove(format.output)
}()
// Verify successful execution // Verify successful execution
if exitCode != 0 { if exitCode != 0 {