Add list types

merge-requests/30/head
Bob Mottram 2024-12-23 15:39:55 +00:00
parent 862d85fa29
commit fff16d723d
16 changed files with 58 additions and 58 deletions

View File

@ -51,7 +51,7 @@ def no_of_announces(post_json_object: {}) -> int:
if not isinstance(obj['shares'], dict): if not isinstance(obj['shares'], dict):
return 0 return 0
if not obj['shares'].get('items'): if not obj['shares'].get('items'):
obj['shares']['items'] = [] obj['shares']['items']: list[dict] = []
obj['shares']['totalItems'] = 0 obj['shares']['totalItems'] = 0
return len(obj['shares']['items']) return len(obj['shares']['items'])

2
art.py
View File

@ -81,7 +81,7 @@ def set_art_site_url(actor_json: {}, art_site_url: str) -> None:
return return
if not actor_json.get('attachment'): if not actor_json.get('attachment'):
actor_json['attachment'] = [] actor_json['attachment']: list[dict] = []
# remove any existing value # remove any existing value
property_found = None property_found = None

View File

@ -123,7 +123,7 @@ def blocked_timeline_json(actor: str, page_number: int, items_per_page: int,
""" """
blocked_accounts_textarea = \ blocked_accounts_textarea = \
get_account_blocks(base_dir, nickname, domain) get_account_blocks(base_dir, nickname, domain)
blocked_list = [] blocked_list: list[str] = []
if blocked_accounts_textarea: if blocked_accounts_textarea:
blocked_list = blocked_accounts_textarea.split('\n') blocked_list = blocked_accounts_textarea.split('\n')
start_index = (page_number - 1) * items_per_page start_index = (page_number - 1) * items_per_page
@ -846,7 +846,7 @@ def is_blocked(base_dir: str, nickname: str, domain: str,
federated_blocks_filename = \ federated_blocks_filename = \
data_dir(base_dir) + '/block_api.txt' data_dir(base_dir) + '/block_api.txt'
if os.path.isfile(federated_blocks_filename): if os.path.isfile(federated_blocks_filename):
block_federated = [] block_federated: list[str] = []
try: try:
with open(federated_blocks_filename, 'r', with open(federated_blocks_filename, 'r',
encoding='utf-8') as fp_fed: encoding='utf-8') as fp_fed:
@ -1268,7 +1268,7 @@ def mute_post(base_dir: str, nickname: str, domain: str, port: int,
post_json_obj['ignores'] = ignores_json post_json_obj['ignores'] = ignores_json
else: else:
if not post_json_obj['ignores'].get('items'): if not post_json_obj['ignores'].get('items'):
post_json_obj['ignores']['items'] = [] post_json_obj['ignores']['items']: list[dict] = []
items_list = post_json_obj['ignores']['items'] items_list = post_json_obj['ignores']['items']
for ignores_item in items_list: for ignores_item in items_list:
if ignores_item.get('actor'): if ignores_item.get('actor'):
@ -1737,7 +1737,7 @@ def import_blocking_file(base_dir: str, nickname: str, domain: str,
blocking_reasons_filename = \ blocking_reasons_filename = \
account_directory + '/blocking_reasons.txt' account_directory + '/blocking_reasons.txt'
existing_lines = [] existing_lines: list[str] = []
if os.path.isfile(blocking_filename): if os.path.isfile(blocking_filename):
try: try:
with open(blocking_filename, 'r', encoding='utf-8') as fp_blocks: with open(blocking_filename, 'r', encoding='utf-8') as fp_blocks:
@ -1746,7 +1746,7 @@ def import_blocking_file(base_dir: str, nickname: str, domain: str,
print('EX: ' + print('EX: ' +
'unable to import existing blocked instances from file ' + 'unable to import existing blocked instances from file ' +
blocking_filename) blocking_filename)
existing_reasons = [] existing_reasons: list[str] = []
if os.path.isfile(blocking_reasons_filename): if os.path.isfile(blocking_reasons_filename):
try: try:
with open(blocking_reasons_filename, with open(blocking_reasons_filename,
@ -1758,8 +1758,8 @@ def import_blocking_file(base_dir: str, nickname: str, domain: str,
'blocked instance reasons from file ' + 'blocked instance reasons from file ' +
blocking_reasons_filename) blocking_reasons_filename)
append_blocks = [] append_blocks: list[str] = []
append_reasons = [] append_reasons: list[str] = []
for line_str in lines: for line_str in lines:
if line_str.startswith('#'): if line_str.startswith('#'):
continue continue
@ -1829,7 +1829,7 @@ def export_blocking_file(base_dir: str, nickname: str, domain: str) -> str:
if not os.path.isfile(blocking_filename): if not os.path.isfile(blocking_filename):
return blocks_header return blocks_header
blocking_lines = [] blocking_lines: list[str] = []
if os.path.isfile(blocking_filename): if os.path.isfile(blocking_filename):
try: try:
with open(blocking_filename, 'r', encoding='utf-8') as fp_block: with open(blocking_filename, 'r', encoding='utf-8') as fp_block:
@ -1837,7 +1837,7 @@ def export_blocking_file(base_dir: str, nickname: str, domain: str) -> str:
except OSError: except OSError:
print('EX: export_blocks failed to read ' + blocking_filename) print('EX: export_blocks failed to read ' + blocking_filename)
blocking_reasons = [] blocking_reasons: list[str] = []
if os.path.isfile(blocking_reasons_filename): if os.path.isfile(blocking_reasons_filename):
try: try:
with open(blocking_reasons_filename, 'r', with open(blocking_reasons_filename, 'r',
@ -1906,7 +1906,7 @@ def load_blocked_military(base_dir: str) -> {}:
"""Loads a list of nicknames for accounts which block military instances """Loads a list of nicknames for accounts which block military instances
""" """
block_military_filename = data_dir(base_dir) + '/block_military.txt' block_military_filename = data_dir(base_dir) + '/block_military.txt'
nicknames_list = [] nicknames_list: list[str] = []
if os.path.isfile(block_military_filename): if os.path.isfile(block_military_filename):
try: try:
with open(block_military_filename, 'r', with open(block_military_filename, 'r',
@ -1927,7 +1927,7 @@ def load_blocked_government(base_dir: str) -> {}:
"""Loads a list of nicknames for accounts which block government instances """Loads a list of nicknames for accounts which block government instances
""" """
block_government_filename = data_dir(base_dir) + '/block_government.txt' block_government_filename = data_dir(base_dir) + '/block_government.txt'
nicknames_list = [] nicknames_list: list[str] = []
if os.path.isfile(block_government_filename): if os.path.isfile(block_government_filename):
try: try:
with open(block_government_filename, 'r', with open(block_government_filename, 'r',
@ -1948,7 +1948,7 @@ def load_blocked_bluesky(base_dir: str) -> {}:
"""Loads a list of nicknames for accounts which block bluesky bridges """Loads a list of nicknames for accounts which block bluesky bridges
""" """
block_bluesky_filename = data_dir(base_dir) + '/block_bluesky.txt' block_bluesky_filename = data_dir(base_dir) + '/block_bluesky.txt'
nicknames_list = [] nicknames_list: list[str] = []
if os.path.isfile(block_bluesky_filename): if os.path.isfile(block_bluesky_filename):
try: try:
with open(block_bluesky_filename, 'r', with open(block_bluesky_filename, 'r',
@ -2076,11 +2076,11 @@ def contains_bluesky_domain(message_str: str) -> bool:
def load_federated_blocks_endpoints(base_dir: str) -> []: def load_federated_blocks_endpoints(base_dir: str) -> []:
"""Loads endpoint urls for federated blocklists """Loads endpoint urls for federated blocklists
""" """
block_federated_endpoints = [] block_federated_endpoints: list[str] = []
block_api_endpoints_filename = \ block_api_endpoints_filename = \
data_dir(base_dir) + '/block_api_endpoints.txt' data_dir(base_dir) + '/block_api_endpoints.txt'
if os.path.isfile(block_api_endpoints_filename): if os.path.isfile(block_api_endpoints_filename):
new_block_federated_endpoints = [] new_block_federated_endpoints: list[str] = []
try: try:
with open(block_api_endpoints_filename, 'r', with open(block_api_endpoints_filename, 'r',
encoding='utf-8') as fp_ep: encoding='utf-8') as fp_ep:
@ -2120,7 +2120,7 @@ def _update_federated_blocks(session, base_dir: str,
mitm_servers: []) -> []: mitm_servers: []) -> []:
"""Creates block_api.txt """Creates block_api.txt
""" """
block_federated = [] block_federated: list[str] = []
debug = True debug = True
if not session: if not session:
@ -2217,7 +2217,7 @@ def save_block_federated_endpoints(base_dir: str,
""" """
block_api_endpoints_filename = \ block_api_endpoints_filename = \
data_dir(base_dir) + '/block_api_endpoints.txt' data_dir(base_dir) + '/block_api_endpoints.txt'
result = [] result: list[str] = []
block_federated_endpoints_str = '' block_federated_endpoints_str = ''
for endpoint in block_federated_endpoints: for endpoint in block_federated_endpoints:
if not endpoint: if not endpoint:

View File

@ -77,9 +77,9 @@ def _no_of_blog_replies(base_dir: str, http_prefix: str, translate: {},
return 1 return 1
return 0 return 0
removals = [] removals: list[str] = []
replies = 0 replies = 0
lines = [] lines: list[str] = []
try: try:
with open(post_filename, 'r', encoding='utf-8') as fp_post: with open(post_filename, 'r', encoding='utf-8') as fp_post:
lines = fp_post.readlines() lines = fp_post.readlines()
@ -154,7 +154,7 @@ def _get_blog_replies(base_dir: str, http_prefix: str, translate: {},
print('EX: unable to read blog 3 ' + post_filename) print('EX: unable to read blog 3 ' + post_filename)
return '' return ''
lines = [] lines: list[str] = []
try: try:
with open(post_filename, 'r', encoding='utf-8') as fp_post: with open(post_filename, 'r', encoding='utf-8') as fp_post:
lines = fp_post.readlines() lines = fp_post.readlines()
@ -275,7 +275,7 @@ def _html_blog_post_content(debug: bool, session, authorized: bool,
person_url = local_actor_url(http_prefix, nickname, domain_full) person_url = local_actor_url(http_prefix, nickname, domain_full)
actor_json = \ actor_json = \
get_person_from_cache(base_dir, person_url, person_cache) get_person_from_cache(base_dir, person_url, person_cache)
languages_understood = [] languages_understood: list[str] = []
if actor_json: if actor_json:
languages_understood = get_actor_languages_list(actor_json) languages_understood = get_actor_languages_list(actor_json)
json_content = get_content_from_post(post_json_object, system_language, json_content = get_content_from_post(post_json_object, system_language,

View File

@ -159,7 +159,7 @@ def _no_of_bookmarks(post_json_object: {}) -> int:
if not isinstance(post_json_object['object']['bookmarks'], dict): if not isinstance(post_json_object['object']['bookmarks'], dict):
return 0 return 0
if not post_json_object['object']['bookmarks'].get('items'): if not post_json_object['object']['bookmarks'].get('items'):
post_json_object['object']['bookmarks']['items'] = [] post_json_object['object']['bookmarks']['items']: list[dict] = []
post_json_object['object']['bookmarks']['totalItems'] = 0 post_json_object['object']['bookmarks']['totalItems'] = 0
return len(post_json_object['object']['bookmarks']['items']) return len(post_json_object['object']['bookmarks']['items'])
@ -220,7 +220,7 @@ def update_bookmarks_collection(recent_posts_cache: {},
post_json_object['object']['bookmarks'] = bookmarks_json post_json_object['object']['bookmarks'] = bookmarks_json
else: else:
if not post_json_object['object']['bookmarks'].get('items'): if not post_json_object['object']['bookmarks'].get('items'):
post_json_object['object']['bookmarks']['items'] = [] post_json_object['object']['bookmarks']['items']: list[dict] = []
bm_items = post_json_object['object']['bookmarks']['items'] bm_items = post_json_object['object']['bookmarks']['items']
for bookmark_item in bm_items: for bookmark_item in bm_items:
if bookmark_item.get('actor'): if bookmark_item.get('actor'):

View File

@ -80,7 +80,7 @@ def set_briar_address(actor_json: {}, briar_address: str) -> None:
not_briar_address = True not_briar_address = True
if not actor_json.get('attachment'): if not actor_json.get('attachment'):
actor_json['attachment'] = [] actor_json['attachment']: list[dict] = []
# remove any existing value # remove any existing value
property_found = None property_found = None

View File

@ -147,7 +147,7 @@ def expire_person_cache(person_cache: {}):
"""Expires old entries from the cache in memory """Expires old entries from the cache in memory
""" """
curr_time = date_utcnow() curr_time = date_utcnow()
removals = [] removals: list[str] = []
for person_url, cache_json in person_cache.items(): for person_url, cache_json in person_cache.items():
cache_time = date_from_string_format(cache_json['timestamp'], cache_time = date_from_string_format(cache_json['timestamp'],
["%Y-%m-%dT%H:%M:%S%z"]) ["%Y-%m-%dT%H:%M:%S%z"])

View File

@ -67,7 +67,7 @@ def load_city_hashtags(base_dir: str, translate: {}) -> None:
cities_filename = base_dir + '/data/cities/' + cities_file cities_filename = base_dir + '/data/cities/' + cities_file
if not os.path.isfile(cities_filename): if not os.path.isfile(cities_filename):
continue continue
cities = [] cities: list[str] = []
try: try:
with open(cities_filename, 'r', encoding='utf-8') as fp_cities: with open(cities_filename, 'r', encoding='utf-8') as fp_cities:
cities = fp_cities.read().split('\n') cities = fp_cities.read().split('\n')
@ -202,7 +202,7 @@ def update_hashtag_categories(base_dir: str) -> None:
category_list_filename) category_list_filename)
return return
category_list = [] category_list: list[str] = []
for category_str, _ in hashtag_categories.items(): for category_str, _ in hashtag_categories.items():
category_list.append(category_str) category_list.append(category_str)
category_list.sort() category_list.sort()

View File

@ -168,7 +168,7 @@ def parse_nogo_string(nogo_line: str) -> []:
pts = polygon_str.split(',') pts = polygon_str.split(',')
if len(pts) <= 4: if len(pts) <= 4:
return [] return []
polygon = [] polygon: list[list] = []
for index in range(int(len(pts)/2)): for index in range(int(len(pts)/2)):
if index*2 + 1 >= len(pts): if index*2 + 1 >= len(pts):
break break
@ -224,7 +224,7 @@ def spoof_geolocation(base_dir: str,
return (default_latitude, default_longitude, return (default_latitude, default_longitude,
default_latdirection, default_longdirection, default_latdirection, default_longdirection,
"", "", 0) "", "", 0)
cities = [] cities: list[str] = []
try: try:
with open(locations_filename, 'r', encoding='utf-8') as fp_loc: with open(locations_filename, 'r', encoding='utf-8') as fp_loc:
cities = fp_loc.readlines() cities = fp_loc.readlines()
@ -236,7 +236,7 @@ def spoof_geolocation(base_dir: str,
nogo = nogo_list nogo = nogo_list
else: else:
if os.path.isfile(nogo_filename): if os.path.isfile(nogo_filename):
nogo_list = [] nogo_list: list[str] = []
try: try:
with open(nogo_filename, 'r', encoding='utf-8') as fp_nogo: with open(nogo_filename, 'r', encoding='utf-8') as fp_nogo:
nogo_list = fp_nogo.readlines() nogo_list = fp_nogo.readlines()

View File

@ -295,7 +295,7 @@ def dangerous_css(filename: str, allow_local_network_access: bool) -> bool:
def switch_words(base_dir: str, nickname: str, domain: str, content: str, def switch_words(base_dir: str, nickname: str, domain: str, content: str,
rules: [] = []) -> str: rules: list[str] = []) -> str:
"""Performs word replacements. eg. Trump -> The Orange Menace """Performs word replacements. eg. Trump -> The Orange Menace
""" """
if is_pgp_encrypted(content) or contains_pgp_public_key(content): if is_pgp_encrypted(content) or contains_pgp_public_key(content):
@ -409,7 +409,7 @@ def _update_common_emoji(base_dir: str, emoji_content: str) -> None:
except OSError: except OSError:
print('EX: unable to load common emoji file') print('EX: unable to load common emoji file')
if common_emoji: if common_emoji:
new_common_emoji = [] new_common_emoji: list[str] = []
emoji_found = False emoji_found = False
for line in common_emoji: for line in common_emoji:
if ' ' + emoji_content in line: if ' ' + emoji_content in line:
@ -1115,7 +1115,7 @@ def remove_long_words(content: str, max_word_length: int,
content = content.replace('<p></p>', '<p> </p>') content = content.replace('<p></p>', '<p> </p>')
words = content.split(' ') words = content.split(' ')
if not long_words_list: if not long_words_list:
long_words_list = [] long_words_list: list[str] = []
for word_str in words: for word_str in words:
if len(word_str) > max_word_length: if len(word_str) > max_word_length:
if word_str not in long_words_list: if word_str not in long_words_list:
@ -1341,7 +1341,7 @@ def load_dogwhistles(filename: str) -> {}:
""" """
if not os.path.isfile(filename): if not os.path.isfile(filename):
return {} return {}
dogwhistle_lines = [] dogwhistle_lines: list[str] = []
try: try:
with open(filename, 'r', encoding='utf-8') as fp_dogwhistles: with open(filename, 'r', encoding='utf-8') as fp_dogwhistles:
dogwhistle_lines = fp_dogwhistles.readlines() dogwhistle_lines = fp_dogwhistles.readlines()
@ -1398,7 +1398,7 @@ def add_html_tags(base_dir: str, http_prefix: str,
words = _get_simplified_content(content).split(' ') words = _get_simplified_content(content).split(' ')
# remove . for words which are not mentions # remove . for words which are not mentions
new_words = [] new_words: list[str] = []
for _, word_str in enumerate(words): for _, word_str in enumerate(words):
if word_str.endswith('.'): if word_str.endswith('.'):
if not word_str.startswith('@'): if not word_str.startswith('@'):
@ -1423,7 +1423,7 @@ def add_html_tags(base_dir: str, http_prefix: str,
petnames = None petnames = None
if '@' in words: if '@' in words:
if os.path.isfile(following_filename): if os.path.isfile(following_filename):
following = [] following: list[str] = []
try: try:
with open(following_filename, 'r', with open(following_filename, 'r',
encoding='utf-8') as fp_foll: encoding='utf-8') as fp_foll:
@ -1437,7 +1437,7 @@ def add_html_tags(base_dir: str, http_prefix: str,
petnames.append(pet + '\n') petnames.append(pet + '\n')
# extract mentions and tags from words # extract mentions and tags from words
long_words_list = [] long_words_list: list[str] = []
prev_word_str = '' prev_word_str = ''
auto_tags_list = _load_auto_tags(base_dir, nickname, domain) auto_tags_list = _load_auto_tags(base_dir, nickname, domain)
append_tags = [] append_tags = []
@ -1539,7 +1539,7 @@ def _string_starts_with_url_prefix(text: str) -> bool:
def get_mentions_from_html(html_text: str, match_str: str) -> []: def get_mentions_from_html(html_text: str, match_str: str) -> []:
"""Extracts mentioned actors from the given html content string """Extracts mentioned actors from the given html content string
""" """
mentions = [] mentions: list[str] = []
if match_str not in html_text: if match_str not in html_text:
return mentions return mentions
mentions_list = html_text.split(match_str) mentions_list = html_text.split(match_str)
@ -2080,14 +2080,14 @@ def content_diff(content: str, prev_content: str) -> str:
""" """
cdiff = difflib.Differ() cdiff = difflib.Differ()
text1_lines = content.splitlines() text1_lines = content.splitlines()
text1_sentences = [] text1_sentences: list[str] = []
for line in text1_lines: for line in text1_lines:
sentences = line.split('.') sentences = line.split('.')
for sentence in sentences: for sentence in sentences:
text1_sentences.append(sentence.strip()) text1_sentences.append(sentence.strip())
text2_lines = prev_content.splitlines() text2_lines = prev_content.splitlines()
text2_sentences = [] text2_sentences: list[str] = []
for line in text2_lines: for line in text2_lines:
sentences = line.split('.') sentences = line.split('.')
for sentence in sentences: for sentence in sentences:
@ -2128,7 +2128,7 @@ def create_edits_html(edits_json: {}, post_json_object: {},
if 'content' not in post_json_object['object']: if 'content' not in post_json_object['object']:
if 'contentMap' not in post_json_object['object']: if 'contentMap' not in post_json_object['object']:
return '' return ''
edit_dates_list = [] edit_dates_list: list[str] = []
for modified, _ in edits_json.items(): for modified, _ in edits_json.items():
edit_dates_list.append(modified) edit_dates_list.append(modified)
edit_dates_list.sort(reverse=True) edit_dates_list.sort(reverse=True)
@ -2243,7 +2243,7 @@ def add_name_emojis_to_tags(base_dir: str, http_prefix: str,
# get emojis from the actor name # get emojis from the actor name
words = name.split(' ') words = name.split(' ')
emojis = [] emojis: list[str] = []
for wrd in words: for wrd in words:
if wrd.startswith(':') and wrd.endswith(':'): if wrd.startswith(':') and wrd.endswith(':'):
if wrd not in emojis: if wrd not in emojis:
@ -2251,7 +2251,7 @@ def add_name_emojis_to_tags(base_dir: str, http_prefix: str,
if not emojis: if not emojis:
return return
actor_tags = [] actor_tags: list[dict] = []
if actor_json.get('tag'): if actor_json.get('tag'):
actor_tags = actor_json['tag'] actor_tags = actor_json['tag']

View File

@ -135,7 +135,7 @@ def _get_replies_to_post(post_json_object: {},
mitm_servers: []) -> []: mitm_servers: []) -> []:
"""Returns a list of reply posts to the given post as json """Returns a list of reply posts to the given post as json
""" """
result = [] result: list[dict] = []
post_obj = post_json_object post_obj = post_json_object
if has_object_dict(post_json_object): if has_object_dict(post_json_object):
post_obj = post_json_object['object'] post_obj = post_json_object['object']
@ -174,7 +174,7 @@ def _get_replies_to_post(post_json_object: {},
if not replies_collection['first'].get('next'): if not replies_collection['first'].get('next'):
return result return result
items_list = [] items_list: list[dict] = []
if replies_collection['first'].get('items'): if replies_collection['first'].get('items'):
items_list = replies_collection['first']['items'] items_list = replies_collection['first']['items']
if not items_list: if not items_list:
@ -303,7 +303,7 @@ def download_conversation_posts(authorized: bool, session,
as_header = { as_header = {
'Accept': 'application/ld+json; profile="' + profile_str + '"' 'Accept': 'application/ld+json; profile="' + profile_str + '"'
} }
conversation_view = [] conversation_view: list[dict] = []
signing_priv_key_pem = get_instance_actor_key(base_dir, domain) signing_priv_key_pem = get_instance_actor_key(base_dir, domain)
post_id = remove_id_ending(post_id) post_id = remove_id_ending(post_id)
post_filename = \ post_filename = \
@ -326,7 +326,7 @@ def download_conversation_posts(authorized: bool, session,
post_json_object) post_json_object)
# get any replies # get any replies
replies_to_post = [] replies_to_post: list[dict] = []
if get_json_valid(post_json_object): if get_json_valid(post_json_object):
replies_to_post = \ replies_to_post = \
_get_replies_to_post(post_json_object, _get_replies_to_post(post_json_object,
@ -335,7 +335,7 @@ def download_conversation_posts(authorized: bool, session,
http_prefix, base_dir, nickname, http_prefix, base_dir, nickname,
domain, 0, [], mitm_servers) domain, 0, [], mitm_servers)
ids = [] ids: list[str] = []
while get_json_valid(post_json_object): while get_json_valid(post_json_object):
if not isinstance(post_json_object, dict): if not isinstance(post_json_object, dict):
break break

View File

@ -45,7 +45,7 @@ def update_known_crawlers(ua_str: str,
if curr_time - last_known_crawler >= 30: if curr_time - last_known_crawler >= 30:
# remove any old observations # remove any old observations
remove_crawlers = [] remove_crawlers: list[str] = []
for uagent, item in known_crawlers.items(): for uagent, item in known_crawlers.items():
if curr_time - item['lastseen'] >= 60 * 60 * 24 * 30: if curr_time - item['lastseen'] >= 60 * 60 * 24 * 30:
remove_crawlers.append(uagent) remove_crawlers.append(uagent)
@ -72,7 +72,7 @@ def load_known_web_bots(base_dir: str) -> []:
known_bots_filename) known_bots_filename)
if not crawlers_str: if not crawlers_str:
return [] return []
known_bots = [] known_bots: list[str] = []
crawlers_list = crawlers_str.split('\n') crawlers_list = crawlers_str.split('\n')
for crawler in crawlers_list: for crawler in crawlers_list:
if not crawler: if not crawler:

View File

@ -124,7 +124,7 @@ def add_cw_from_lists(post_json_object: {}, cw_lists: {}, translate: {},
if not content: if not content:
return return
post_tags = [] post_tags: list[dict] = []
if post_json_object['object'].get('tag'): if post_json_object['object'].get('tag'):
if isinstance(post_json_object['object']['tag'], list): if isinstance(post_json_object['object']['tag'], list):
post_tags = post_json_object['object']['tag'] post_tags = post_json_object['object']['tag']

View File

@ -66,7 +66,7 @@ def set_cwtch_address(actor_json: {}, cwtch_address: str) -> None:
not_cwtch_address = True not_cwtch_address = True
if not actor_json.get('attachment'): if not actor_json.get('attachment'):
actor_json['attachment'] = [] actor_json['attachment']: list[dict] = []
# remove any existing value # remove any existing value
property_found = None property_found = None

View File

@ -321,7 +321,7 @@ class EpicyonServer(ThreadingHTTPServer):
clacks = None clacks = None
public_replies_unlisted = False public_replies_unlisted = False
dogwhistles = {} dogwhistles = {}
preferred_podcast_formats = [] preferred_podcast_formats: list[str] = []
bold_reading = {} bold_reading = {}
hide_follows = {} hide_follows = {}
account_timezone = None account_timezone = None
@ -404,8 +404,8 @@ class EpicyonServer(ThreadingHTTPServer):
domain_full = '' domain_full = ''
http_prefix = 'https' http_prefix = 'https'
debug = False debug = False
federation_list = [] federation_list: list[str] = []
shared_items_federated_domains = [] shared_items_federated_domains: list[str] = []
base_dir = '' base_dir = ''
instance_id = '' instance_id = ''
person_cache = {} person_cache = {}
@ -420,7 +420,7 @@ class EpicyonServer(ThreadingHTTPServer):
getreq_busy = False getreq_busy = False
postreq_busy = False postreq_busy = False
received_message = False received_message = False
inbox_queue = [] inbox_queue: list[dict] = []
send_threads = None send_threads = None
postLog = [] postLog = []
max_queue_length = 64 max_queue_length = 64

View File

@ -3680,7 +3680,7 @@ def _individual_follow_as_html(signing_priv_key_pem: str,
debug: bool, debug: bool,
system_language: str, system_language: str,
mitm_servers: [], mitm_servers: [],
buttons: list = []) -> str: buttons: list[str] = []) -> str:
"""An individual follow entry on the profile screen """An individual follow entry on the profile screen
""" """
follow_url_nickname = get_nickname_from_actor(follow_url) follow_url_nickname = get_nickname_from_actor(follow_url)