Module: blog¶
This module reads all the json files from a prior fetch as well as the alt-text files for the wordcloud and the graph. Then it formats them into a markdown file suitable for running through hugo. The results are then manually copied to a hugo web site.
This can only succeed if: 1. fetch has run successfully 2. analyse has run successfully 3. graphs has run successfully to generate graphs and text
Because writing the fetch statistics to a JSON file was added later, most of my data doesn't have that information. Only since October 2025 have I tracked the fetch statistics. So if the fetch data doesn't exist, the blog module will complain but it continues.
An example site is the Monsterdon Archives. You can see the Hugo source code in the gallery repository.
Code Reference¶
Module for writing out a Hugo-compatible blog post in Markdown. Reads two JSON dumps, one written by fetch and one written by analyse. Then it writes out the blog post. Since writing out the fetch JSON was only created in October 2025, the code handles the possibility of it not existing.
blog(config)
¶
Do what it takes to find the results, convert them to markdown, then write it out in Hugo-compatible Markdown. Hugo compatibility means putting some extra TOML variables at the item. It's not just raw markdown.
Source code in mastoscore/blog.py
def blog(config: ConfigParser) -> bool:
"""Do what it takes to find the results, convert them to markdown, then
write it out in Hugo-compatible Markdown. Hugo compatibility means
putting some extra TOML variables at the item. It's not just raw markdown.
"""
logger = logging.getLogger(__name__)
analysis = read_json(config, "analysis")
if not analysis:
logger.error("Problem with analysis file? No blog written.")
return False
# Create directory structure
dir_path = create_journal_directory(config)
if not dir_path:
return False
blog_text = create_blog_text(config, analysis)
# Blog file is index.md
blog_filename = join(dir_path, "index.md")
try:
with open(blog_filename, "+w") as bfile:
print(blog_text, file=bfile)
except (OSError, IOError) as e:
logger.critical(f"Failed to write blog text to {blog_filename}")
logger.critical(e)
return False
except Exception as e:
logger.critical(f"Failed to write blog text to {blog_filename}")
logger.critical(e)
return False
return True
create_blog_text(config, analysis)
¶
Write out the Markdown for the blog post with all the analysis.
Source code in mastoscore/blog.py
def create_blog_text(config: ConfigParser, analysis: dict) -> str:
"""Write out the Markdown for the blog post with all the analysis."""
# pick a language, so that things like currency and numbers come out
# as expected
setlocale(LC_ALL, "en_US.UTF-8")
logger = logging.getLogger(__name__)
event_title = None
year = config.get("mastoscore", "event_year")
month = config.get("mastoscore", "event_month")
day = config.get("mastoscore", "event_day")
hashtag = config.get("mastoscore", "hashtag")
# older data files had event_title, but not episode_title
try:
event_title = config.get("mastoscore", "episode_title")
except Exception:
logger.debug("episode_title not found. Trying event_title")
if not event_title:
try:
event_title = config.get("mastoscore", "event_title")
except Exception:
logger.error("neither episode_title nor event_title found")
event_title = f"{hashtag} {year}-{month}-{day}"
# Find our journal directory so we can see if there are text files
# accompanying the images.
journaldir = create_journal_directory(config)
if not journaldir:
return ""
text_file_name = join(journaldir, f"{hashtag}-{year}{month}{day}.txt")
graph_alt_text = ""
with open(text_file_name, "r", encoding="utf-8") as tfile:
graph_alt_text = tfile.read()
text_file_name = join(
journaldir, f"wordcloud-{hashtag}-{year}{month}{day}-remove.txt"
)
wordcloud_alt_text = ""
with open(text_file_name, "r", encoding="utf-8") as tfile:
wordcloud_alt_text = tfile.read()
# These bits of hugo markdown are a pain to type, so I'm assigning them to variables to make them
# easier to work with.
a = "{{< hilite >}}"
b = "{{</ hilite >}}"
# write out top boosts
top = analysis["max_boosts"]
text = "\n\n"
text = text + f"### The top {analysis['top_n']} boosted toots:\n"
i = 1
for item in top:
text = (
text
+ f"""
{i}. [This toot]({item["url"]}) from [{item["account.display_name"]}]({item["account.url"]})
({item["userid"]}) had {a}{item["reblogs_count"]}{b} boosts.
> {item["content"]}
"""
)
i += 1
boost_text = text
# write out item favourites
top = analysis["max_faves"]
text = "\n\n"
text = text + f"### The top {analysis['top_n']} favourited toots:\n"
i = 1
for item in top:
text = (
text
+ f"""
{i}. [This toot]({item["url"]}) from [{item["account.display_name"]}]({item["account.url"]})
({item["userid"]}) had {a}{item["favourites_count"]}{b} favourites.
> {item["content"]}
"""
)
i += 1
text = text + "\n"
faves_text = text
# write out item replies
top = analysis["max_replies"]
text = "\n\n"
text = (
text
+ f"""### The top {analysis["top_n"]} most-replied-to toots:
(Note: this really should exclude people who reply to themselves, it doesn't do that well, yet)
"""
)
i = 1
for item in top:
text = (
text
+ f"""
{i}. [This toot]({item["url"]}) from [{item["account.display_name"]}]({item["account.url"]})
({item["userid"]}) had {a}{item["replies_count"]}{b} replies.
> {item["content"]}
"""
)
i += 1
replies_text = text
try:
fetch_info = read_json(config, "fetch")
total_servers = len(fetch_info["servers_done"]) + len(
fetch_info["servers_fail"]
)
done_servers = len(fetch_info["servers_done"])
fail_servers = len(fetch_info["servers_fail"])
done_pct = 100 * done_servers / total_servers
fail_pct = 100 * fail_servers / total_servers
intro = "We"
if 'fetch_duration' in fetch_info:
intro = f"{intro} spent {a}{fetch_info['fetch_duration']}{b} downloading"
else:
intro = f"{intro} downloaded"
fetch_text = f"""
{intro} {a}{fetch_info["total_toots"]:n}{b} toots at {a}{fetch_info["fetch_time"]}{b} \
from a total of {done_servers:n} servers. We ended up with a set of \
{a}{analysis["gross_toots"]:n}{b} unique toots posted between \
{a}{analysis["event_start"]}{b} and {a}{analysis["event_end"]}{b} by \
{a}{analysis["unique_ids"]:n}{b} distinct fediverse accounts.
We tried to connect to {total_servers:n} different servers, of which {done_servers} \
({done_pct:0.1f}%) succeeded and {fail_servers} ({fail_pct:0.1f}%) failed.
"""
except Exception:
# we don't care there was an exception. just using this for error handling
fetch_text = f"""
We found {a}{analysis["gross_toots"]:n}{b} unique toots posted between \
{a}{analysis["event_start"]}{b} and {a}{analysis["event_end"]}{b} by \
{a}{analysis["unique_ids"]:n}{b} distinct fediverse accounts across \
{analysis["num_servers"]} unique servers.
"""
summary_text = f"""
## Summary
{{{{< figure\
src="thumb.jpg"\
alt="(Movie poster placeholder)" \
>}}}}
{fetch_text}
The server {a}[{analysis["max_server"]["name"]}](https://{analysis["max_server"]["name"]}/){b}\
contributed the most toots with {a}{analysis["max_server"]["num"]:n}{b}.
'{analysis["most_posts"]["name"]}' ({analysis["most_posts"]["id"]}) tooted the most with \
{a}{analysis["most_posts"]["count"]:n}{b} toots.
"""
# Try to pluck the year of the film from the title. Film titles are usually something like
# "The Swamp Thing (1982)"
match = search(r"\((\d{4})\)", event_title)
blog_tags = f'["{hashtag}"'
if match:
film_year = match.group(1)
blog_tags = blog_tags + f', "{film_year}"]'
else:
blog_tags = blog_tags + "]"
graph_text = f"""
## Activity Graph
{{{{< figure\
src="{hashtag}-{year}{month}{day}.png"\
alt="Activity graph. See main page for actual description." \
>}}}}
{graph_alt_text}
"""
wordcloud_text = f"""
## Wordcloud
{{{{< figure\
src="wordcloud-{hashtag}-{year}{month}{day}-remove.png"\
alt="Word cloud. See main page for actual description." \
>}}}}
{wordcloud_alt_text}
"""
# Pull it all together, with the Hugo stuff at the item
# TOML requires triple double quotes if you want to have newlines in a string.
# So I have to do triple single quotes here.
wordcloud_file = f"{year}/{month}/{day}/{hashtag}/wordcloud-{hashtag}-{year}{month}{day}-remove.png"
blog_text = f'''
+++
title = """{event_title}"""
date = {year}-{month}-{day}
draft = false
image = "{wordcloud_file}"
description = """{hashtag} graphs for {event_title}"""
tags = {blog_tags}
archives = ["{year}/{month}"]
author = "Paco Hope"
+++
{summary_text}
{boost_text}
{faves_text}
{replies_text}
{graph_text}
{wordcloud_text}
Analysis of #{hashtag} generated at {analysis["generated"]}
'''
# print out the commands that will copy the data to the blog
print(f"""
mkdir -p ../{hashtag}-gallery/content/{year}/{month}/{day}/{hashtag}/
cp $(ls data/{year}/{month}/{day}/* | grep -v json) ../{hashtag}-gallery/content/{year}/{month}/{day}/{hashtag}
""")
return blog_text