fix sorting in changelog-archive.yml

This commit is contained in:
CanbiZ (MickLesk)
2026-02-04 13:59:57 +01:00
parent c599fd7551
commit 5210d1bb71

View File

@@ -54,8 +54,9 @@ jobs:
console.log(`Cutoff date: ${cutoffDate.toISOString().split('T')[0]}`); console.log(`Cutoff date: ${cutoffDate.toISOString().split('T')[0]}`);
// Read changelog // Read changelog and normalize line endings
const content = await fs.readFile(CHANGELOG_PATH, 'utf-8'); let content = await fs.readFile(CHANGELOG_PATH, 'utf-8');
content = content.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
const lines = content.split('\n'); const lines = content.split('\n');
// Parse entries // Parse entries
@@ -148,30 +149,55 @@ jobs:
let existingContent = ''; let existingContent = '';
try { try {
existingContent = await fs.readFile(monthPath, 'utf-8'); existingContent = await fs.readFile(monthPath, 'utf-8');
// Normalize line endings to prevent regex issues
existingContent = existingContent.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
} catch (e) { } catch (e) {
// File doesn't exist // File doesn't exist
} }
// Merge new entries with existing (avoid duplicates) // Parse existing entries into a Map (date -> content) for deduplication
const existingDates = new Set(); const allEntries = new Map();
const existingDatePattern = /^## (\d{4}-\d{2}-\d{2})$/gm;
let match; // Helper function to parse entries from content
while ((match = existingDatePattern.exec(existingContent)) !== null) { const parseEntries = (content) => {
existingDates.add(match[1]); const entries = new Map();
const parts = content.split(/(?=^## \d{4}-\d{2}-\d{2}$)/m);
for (const part of parts) {
const trimmed = part.trim();
if (!trimmed) continue;
const dateMatch = trimmed.match(/^## (\d{4}-\d{2}-\d{2})/);
if (dateMatch) {
entries.set(dateMatch[1], trimmed);
}
}
return entries;
};
// Parse existing content
if (existingContent) {
const existingEntries = parseEntries(existingContent);
for (const [date, content] of existingEntries) {
allEntries.set(date, content);
}
} }
const newEntries = archiveData[year][month].filter(entry => { // Add new entries (existing entries take precedence to avoid overwriting)
let addedCount = 0;
for (const entry of archiveData[year][month]) {
const dateMatch = entry.match(/^## (\d{4}-\d{2}-\d{2})/); const dateMatch = entry.match(/^## (\d{4}-\d{2}-\d{2})/);
return dateMatch && !existingDates.has(dateMatch[1]); if (dateMatch && !allEntries.has(dateMatch[1])) {
}); allEntries.set(dateMatch[1], entry.trim());
addedCount++;
}
}
if (newEntries.length > 0) { // Sort entries by date (newest first) and write
const allContent = existingContent const sortedDates = [...allEntries.keys()].sort().reverse();
? existingContent + '\n\n' + newEntries.join('\n\n') const sortedContent = sortedDates.map(date => allEntries.get(date)).join('\n\n');
: newEntries.join('\n\n');
if (addedCount > 0 || !existingContent) {
await fs.writeFile(monthPath, allContent, 'utf-8'); await fs.writeFile(monthPath, sortedContent + '\n', 'utf-8');
console.log(`Updated: ${monthPath} (+${newEntries.length} entries)`); console.log(`Updated: ${monthPath} (${allEntries.size} total entries, +${addedCount} new)`);
} }
} }
} }
@@ -218,7 +244,8 @@ jobs:
// Count entries in month file // Count entries in month file
let entryCount = 0; let entryCount = 0;
try { try {
const monthContent = await fs.readFile(monthPath, 'utf-8'); let monthContent = await fs.readFile(monthPath, 'utf-8');
monthContent = monthContent.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
entryCount = (monthContent.match(/^## \d{4}-\d{2}-\d{2}$/gm) || []).length; entryCount = (monthContent.match(/^## \d{4}-\d{2}-\d{2}$/gm) || []).length;
} catch (e) { } catch (e) {
entryCount = (archiveData[year]?.[month] || []).length; entryCount = (archiveData[year]?.[month] || []).length;