mirror of https://gitlab.com/bashrc2/epicyon
Snake case
parent
9793b725e1
commit
43379ae568
|
@ -21,175 +21,175 @@ from webapp_utils import html_footer
|
|||
|
||||
|
||||
def get_hashtag_categories_feed(base_dir: str,
|
||||
hashtagCategories: {} = None) -> str:
|
||||
hashtag_categories: {} = None) -> str:
|
||||
"""Returns an rss feed for hashtag categories
|
||||
"""
|
||||
if not hashtagCategories:
|
||||
hashtagCategories = get_hashtag_categories(base_dir)
|
||||
if not hashtagCategories:
|
||||
if not hashtag_categories:
|
||||
hashtag_categories = get_hashtag_categories(base_dir)
|
||||
if not hashtag_categories:
|
||||
return None
|
||||
|
||||
rssStr = \
|
||||
rss_str = \
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n" + \
|
||||
"<rss version=\"2.0\">\n" + \
|
||||
'<channel>\n' + \
|
||||
' <title>#categories</title>\n'
|
||||
|
||||
rssDateStr = \
|
||||
rss_date_str = \
|
||||
datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S UT")
|
||||
|
||||
for categoryStr, hashtagList in hashtagCategories.items():
|
||||
rssStr += \
|
||||
for category_str, hashtag_list in hashtag_categories.items():
|
||||
rss_str += \
|
||||
'<item>\n' + \
|
||||
' <title>' + categoryStr + '</title>\n'
|
||||
listStr = ''
|
||||
for hashtag in hashtagList:
|
||||
' <title>' + category_str + '</title>\n'
|
||||
list_str = ''
|
||||
for hashtag in hashtag_list:
|
||||
if ':' in hashtag:
|
||||
continue
|
||||
if '&' in hashtag:
|
||||
continue
|
||||
listStr += hashtag + ' '
|
||||
rssStr += \
|
||||
' <description>' + listStr.strip() + '</description>\n' + \
|
||||
list_str += hashtag + ' '
|
||||
rss_str += \
|
||||
' <description>' + list_str.strip() + '</description>\n' + \
|
||||
' <link/>\n' + \
|
||||
' <pubDate>' + rssDateStr + '</pubDate>\n' + \
|
||||
' <pubDate>' + rss_date_str + '</pubDate>\n' + \
|
||||
'</item>\n'
|
||||
|
||||
rssStr += \
|
||||
rss_str += \
|
||||
'</channel>\n' + \
|
||||
'</rss>\n'
|
||||
return rssStr
|
||||
return rss_str
|
||||
|
||||
|
||||
def html_hash_tag_swarm(base_dir: str, actor: str, translate: {}) -> str:
|
||||
"""Returns a tag swarm of today's hashtags
|
||||
"""
|
||||
maxTagLength = 42
|
||||
max_tag_length = 42
|
||||
curr_time = datetime.utcnow()
|
||||
daysSinceEpoch = (curr_time - datetime(1970, 1, 1)).days
|
||||
daysSinceEpochStr = str(daysSinceEpoch) + ' '
|
||||
daysSinceEpochStr2 = str(daysSinceEpoch - 1) + ' '
|
||||
recently = daysSinceEpoch - 1
|
||||
tagSwarm = []
|
||||
categorySwarm = []
|
||||
domainHistogram = {}
|
||||
days_since_epoch = (curr_time - datetime(1970, 1, 1)).days
|
||||
days_since_epoch_str = str(days_since_epoch) + ' '
|
||||
days_since_epoch_str2 = str(days_since_epoch - 1) + ' '
|
||||
recently = days_since_epoch - 1
|
||||
tag_swarm = []
|
||||
category_swarm = []
|
||||
domain_histogram = {}
|
||||
|
||||
# Load the blocked hashtags into memory.
|
||||
# This avoids needing to repeatedly load the blocked file for each hashtag
|
||||
blockedStr = ''
|
||||
globalBlockingFilename = base_dir + '/accounts/blocking.txt'
|
||||
if os.path.isfile(globalBlockingFilename):
|
||||
with open(globalBlockingFilename, 'r') as fp:
|
||||
blockedStr = fp.read()
|
||||
blocked_str = ''
|
||||
global_blocking_filename = base_dir + '/accounts/blocking.txt'
|
||||
if os.path.isfile(global_blocking_filename):
|
||||
with open(global_blocking_filename, 'r') as fp_block:
|
||||
blocked_str = fp_block.read()
|
||||
|
||||
for subdir, dirs, files in os.walk(base_dir + '/tags'):
|
||||
for f in files:
|
||||
if not f.endswith('.txt'):
|
||||
for _, _, files in os.walk(base_dir + '/tags'):
|
||||
for fname in files:
|
||||
if not fname.endswith('.txt'):
|
||||
continue
|
||||
tagsFilename = os.path.join(base_dir + '/tags', f)
|
||||
if not os.path.isfile(tagsFilename):
|
||||
tags_filename = os.path.join(base_dir + '/tags', fname)
|
||||
if not os.path.isfile(tags_filename):
|
||||
continue
|
||||
|
||||
# get last modified datetime
|
||||
modTimesinceEpoc = os.path.getmtime(tagsFilename)
|
||||
lastModifiedDate = datetime.fromtimestamp(modTimesinceEpoc)
|
||||
fileDaysSinceEpoch = (lastModifiedDate - datetime(1970, 1, 1)).days
|
||||
mod_time_since_epoc = os.path.getmtime(tags_filename)
|
||||
last_modified_date = datetime.fromtimestamp(mod_time_since_epoc)
|
||||
file_days_since_epoch = \
|
||||
(last_modified_date - datetime(1970, 1, 1)).days
|
||||
|
||||
# check if the file was last modified within the previous
|
||||
# two days
|
||||
if fileDaysSinceEpoch < recently:
|
||||
if file_days_since_epoch < recently:
|
||||
continue
|
||||
|
||||
hashTagName = f.split('.')[0]
|
||||
if len(hashTagName) > maxTagLength:
|
||||
hash_tag_name = fname.split('.')[0]
|
||||
if len(hash_tag_name) > max_tag_length:
|
||||
# NoIncrediblyLongAndBoringHashtagsShownHere
|
||||
continue
|
||||
if '#' in hashTagName or \
|
||||
'&' in hashTagName or \
|
||||
'"' in hashTagName or \
|
||||
"'" in hashTagName:
|
||||
if '#' in hash_tag_name or \
|
||||
'&' in hash_tag_name or \
|
||||
'"' in hash_tag_name or \
|
||||
"'" in hash_tag_name:
|
||||
continue
|
||||
if '#' + hashTagName + '\n' in blockedStr:
|
||||
if '#' + hash_tag_name + '\n' in blocked_str:
|
||||
continue
|
||||
with open(tagsFilename, 'r') as fp:
|
||||
with open(tags_filename, 'r') as fp_tags:
|
||||
# only read one line, which saves time and memory
|
||||
lastTag = fp.readline()
|
||||
if not lastTag.startswith(daysSinceEpochStr):
|
||||
if not lastTag.startswith(daysSinceEpochStr2):
|
||||
last_tag = fp_tags.readline()
|
||||
if not last_tag.startswith(days_since_epoch_str):
|
||||
if not last_tag.startswith(days_since_epoch_str2):
|
||||
continue
|
||||
with open(tagsFilename, 'r') as tagsFile:
|
||||
with open(tags_filename, 'r') as fp_tags:
|
||||
while True:
|
||||
line = tagsFile.readline()
|
||||
line = fp_tags.readline()
|
||||
if not line:
|
||||
break
|
||||
elif ' ' not in line:
|
||||
if ' ' not in line:
|
||||
break
|
||||
sections = line.split(' ')
|
||||
if len(sections) != 3:
|
||||
break
|
||||
postDaysSinceEpochStr = sections[0]
|
||||
if not postDaysSinceEpochStr.isdigit():
|
||||
post_days_since_epoch_str = sections[0]
|
||||
if not post_days_since_epoch_str.isdigit():
|
||||
break
|
||||
postDaysSinceEpoch = int(postDaysSinceEpochStr)
|
||||
if postDaysSinceEpoch < recently:
|
||||
post_days_since_epoch = int(post_days_since_epoch_str)
|
||||
if post_days_since_epoch < recently:
|
||||
break
|
||||
else:
|
||||
postUrl = sections[2]
|
||||
if '##' not in postUrl:
|
||||
break
|
||||
postDomain = postUrl.split('##')[1]
|
||||
if '#' in postDomain:
|
||||
postDomain = postDomain.split('#')[0]
|
||||
post_url = sections[2]
|
||||
if '##' not in post_url:
|
||||
break
|
||||
post_domain = post_url.split('##')[1]
|
||||
if '#' in post_domain:
|
||||
post_domain = post_domain.split('#')[0]
|
||||
|
||||
if domainHistogram.get(postDomain):
|
||||
domainHistogram[postDomain] = \
|
||||
domainHistogram[postDomain] + 1
|
||||
else:
|
||||
domainHistogram[postDomain] = 1
|
||||
tagSwarm.append(hashTagName)
|
||||
categoryFilename = \
|
||||
tagsFilename.replace('.txt', '.category')
|
||||
if os.path.isfile(categoryFilename):
|
||||
categoryStr = \
|
||||
get_hashtag_category(base_dir, hashTagName)
|
||||
if len(categoryStr) < maxTagLength:
|
||||
if '#' not in categoryStr and \
|
||||
'&' not in categoryStr and \
|
||||
'"' not in categoryStr and \
|
||||
"'" not in categoryStr:
|
||||
if categoryStr not in categorySwarm:
|
||||
categorySwarm.append(categoryStr)
|
||||
break
|
||||
if domain_histogram.get(post_domain):
|
||||
domain_histogram[post_domain] = \
|
||||
domain_histogram[post_domain] + 1
|
||||
else:
|
||||
domain_histogram[post_domain] = 1
|
||||
tag_swarm.append(hash_tag_name)
|
||||
category_filename = \
|
||||
tags_filename.replace('.txt', '.category')
|
||||
if os.path.isfile(category_filename):
|
||||
category_str = \
|
||||
get_hashtag_category(base_dir, hash_tag_name)
|
||||
if len(category_str) < max_tag_length:
|
||||
if '#' not in category_str and \
|
||||
'&' not in category_str and \
|
||||
'"' not in category_str and \
|
||||
"'" not in category_str:
|
||||
if category_str not in category_swarm:
|
||||
category_swarm.append(category_str)
|
||||
break
|
||||
break
|
||||
|
||||
if not tagSwarm:
|
||||
if not tag_swarm:
|
||||
return ''
|
||||
tagSwarm.sort()
|
||||
tag_swarm.sort()
|
||||
|
||||
# swarm of categories
|
||||
categorySwarmStr = ''
|
||||
if categorySwarm:
|
||||
if len(categorySwarm) > 3:
|
||||
categorySwarm.sort()
|
||||
for categoryStr in categorySwarm:
|
||||
categorySwarmStr += \
|
||||
'<a href="' + actor + '/category/' + categoryStr + \
|
||||
'" class="hashtagswarm"><b>' + categoryStr + '</b></a>\n'
|
||||
categorySwarmStr += '<br>\n'
|
||||
category_swarm_str = ''
|
||||
if category_swarm:
|
||||
if len(category_swarm) > 3:
|
||||
category_swarm.sort()
|
||||
for category_str in category_swarm:
|
||||
category_swarm_str += \
|
||||
'<a href="' + actor + '/category/' + category_str + \
|
||||
'" class="hashtagswarm"><b>' + category_str + '</b></a>\n'
|
||||
category_swarm_str += '<br>\n'
|
||||
|
||||
# swarm of tags
|
||||
tagSwarmStr = ''
|
||||
for tagName in tagSwarm:
|
||||
tagSwarmStr += \
|
||||
'<a href="' + actor + '/tags/' + tagName + \
|
||||
'" class="hashtagswarm">' + tagName + '</a>\n'
|
||||
tag_swarm_str = ''
|
||||
for tag_name in tag_swarm:
|
||||
tag_swarm_str += \
|
||||
'<a href="' + actor + '/tags/' + tag_name + \
|
||||
'" class="hashtagswarm">' + tag_name + '</a>\n'
|
||||
|
||||
if categorySwarmStr:
|
||||
tagSwarmStr = \
|
||||
get_content_warning_button('alltags', translate, tagSwarmStr)
|
||||
if category_swarm_str:
|
||||
tag_swarm_str = \
|
||||
get_content_warning_button('alltags', translate, tag_swarm_str)
|
||||
|
||||
tagSwarmHtml = categorySwarmStr + tagSwarmStr.strip() + '\n'
|
||||
return tagSwarmHtml
|
||||
tag_swarm_html = category_swarm_str + tag_swarm_str.strip() + '\n'
|
||||
return tag_swarm_html
|
||||
|
||||
|
||||
def html_search_hashtag_category(css_cache: {}, translate: {},
|
||||
|
@ -198,8 +198,8 @@ def html_search_hashtag_category(css_cache: {}, translate: {},
|
|||
"""Show hashtags after selecting a category on the main search screen
|
||||
"""
|
||||
actor = path.split('/category/')[0]
|
||||
categoryStr = path.split('/category/')[1].strip()
|
||||
searchNickname = get_nickname_from_actor(actor)
|
||||
category_str = path.split('/category/')[1].strip()
|
||||
search_nickname = get_nickname_from_actor(actor)
|
||||
|
||||
set_custom_background(base_dir, 'search-background', 'follow-background')
|
||||
|
||||
|
@ -207,37 +207,37 @@ def html_search_hashtag_category(css_cache: {}, translate: {},
|
|||
if os.path.isfile(base_dir + '/search.css'):
|
||||
css_filename = base_dir + '/search.css'
|
||||
|
||||
instanceTitle = \
|
||||
instance_title = \
|
||||
get_config_param(base_dir, 'instanceTitle')
|
||||
htmlStr = \
|
||||
html_header_with_external_style(css_filename, instanceTitle, None)
|
||||
html_str = \
|
||||
html_header_with_external_style(css_filename, instance_title, None)
|
||||
|
||||
# show a banner above the search box
|
||||
searchBannerFile, searchBannerFilename = \
|
||||
get_search_banner_file(base_dir, searchNickname, domain, theme)
|
||||
search_banner_file, search_banner_filename = \
|
||||
get_search_banner_file(base_dir, search_nickname, domain, theme)
|
||||
|
||||
if os.path.isfile(searchBannerFilename):
|
||||
htmlStr += '<a href="' + actor + '/search">\n'
|
||||
htmlStr += '<img loading="lazy" class="timeline-banner" src="' + \
|
||||
actor + '/' + searchBannerFile + '" alt="" /></a>\n'
|
||||
if os.path.isfile(search_banner_filename):
|
||||
html_str += '<a href="' + actor + '/search">\n'
|
||||
html_str += '<img loading="lazy" class="timeline-banner" src="' + \
|
||||
actor + '/' + search_banner_file + '" alt="" /></a>\n'
|
||||
|
||||
htmlStr += \
|
||||
html_str += \
|
||||
'<div class="follow">' + \
|
||||
'<center><br><br><br>' + \
|
||||
'<h1><a href="' + actor + '/search"><b>' + \
|
||||
translate['Category'] + ': ' + categoryStr + '</b></a></h1>'
|
||||
translate['Category'] + ': ' + category_str + '</b></a></h1>'
|
||||
|
||||
hashtagsDict = get_hashtag_categories(base_dir, True, categoryStr)
|
||||
if hashtagsDict:
|
||||
for categoryStr2, hashtagList in hashtagsDict.items():
|
||||
hashtagList.sort()
|
||||
for tagName in hashtagList:
|
||||
htmlStr += \
|
||||
'<a href="' + actor + '/tags/' + tagName + \
|
||||
'" class="hashtagswarm">' + tagName + '</a>\n'
|
||||
hashtags_dict = get_hashtag_categories(base_dir, True, category_str)
|
||||
if hashtags_dict:
|
||||
for _, hashtag_list in hashtags_dict.items():
|
||||
hashtag_list.sort()
|
||||
for tag_name in hashtag_list:
|
||||
html_str += \
|
||||
'<a href="' + actor + '/tags/' + tag_name + \
|
||||
'" class="hashtagswarm">' + tag_name + '</a>\n'
|
||||
|
||||
htmlStr += \
|
||||
html_str += \
|
||||
'</center>' + \
|
||||
'</div>'
|
||||
htmlStr += html_footer()
|
||||
return htmlStr
|
||||
html_str += html_footer()
|
||||
return html_str
|
||||
|
|
Loading…
Reference in New Issue