epicyon/utils.py

3291 lines
110 KiB
Python
Raw Normal View History

2020-04-04 13:44:49 +00:00
__filename__ = "utils.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
2021-01-26 10:07:42 +00:00
__version__ = "1.2.0"
2020-04-04 13:44:49 +00:00
__maintainer__ = "Bob Mottram"
2021-09-10 16:14:50 +00:00
__email__ = "bob@libreserver.org"
2020-04-04 13:44:49 +00:00
__status__ = "Production"
2021-06-26 11:16:41 +00:00
__module_group__ = "Core"
2019-07-02 09:25:29 +00:00
import os
import re
2019-10-11 18:03:58 +00:00
import time
2019-09-29 18:48:34 +00:00
import shutil
2019-07-02 09:25:29 +00:00
import datetime
2019-11-23 10:20:30 +00:00
import json
import idna
2021-03-18 17:27:46 +00:00
import locale
2020-06-06 18:16:16 +00:00
from pprint import pprint
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
2021-12-27 16:18:52 +00:00
from followingCalendar import add_person_to_calendar
2021-02-11 10:33:56 +00:00
# posts containing these strings will always get screened out,
# both incoming and outgoing.
# Could include dubious clacks or admin dogwhistles
2021-12-26 10:11:18 +00:00
INVALID_CHARACTERS = (
2021-08-30 18:15:51 +00:00
'', '', '', '', '', '', 'ϟϟ', '🏳️‍🌈🚫', '⚡⚡'
2021-02-11 10:33:56 +00:00
)
2021-12-26 10:19:59 +00:00
def local_actor_url(http_prefix: str, nickname: str, domain_full: str) -> str:
2021-08-14 11:13:39 +00:00
"""Returns the url for an actor on this instance
"""
2021-12-26 10:00:46 +00:00
return http_prefix + '://' + domain_full + '/users/' + nickname
2021-08-14 11:13:39 +00:00
2021-12-26 10:29:52 +00:00
def get_actor_languages_list(actor_json: {}) -> []:
"""Returns a list containing languages used by the given actor
"""
2021-12-26 10:29:52 +00:00
if not actor_json.get('attachment'):
return []
2021-12-26 10:32:45 +00:00
for property_value in actor_json['attachment']:
if not property_value.get('name'):
continue
2021-12-26 10:32:45 +00:00
if not property_value['name'].lower().startswith('languages'):
continue
2021-12-26 10:32:45 +00:00
if not property_value.get('type'):
continue
2021-12-26 10:32:45 +00:00
if not property_value.get('value'):
continue
2021-12-26 10:32:45 +00:00
if property_value['type'] != 'PropertyValue':
continue
2021-12-26 10:32:45 +00:00
if isinstance(property_value['value'], list):
2021-12-26 10:35:37 +00:00
lang_list = property_value['value']
lang_list.sort()
return lang_list
2021-12-27 16:18:52 +00:00
if isinstance(property_value['value'], str):
2021-12-26 10:38:02 +00:00
lang_str = property_value['value']
2021-12-26 10:40:24 +00:00
lang_list_temp = []
2021-12-26 10:38:02 +00:00
if ',' in lang_str:
2021-12-26 10:40:24 +00:00
lang_list_temp = lang_str.split(',')
2021-12-26 10:38:02 +00:00
elif ';' in lang_str:
2021-12-26 10:40:24 +00:00
lang_list_temp = lang_str.split(';')
2021-12-26 10:38:02 +00:00
elif '/' in lang_str:
2021-12-26 10:40:24 +00:00
lang_list_temp = lang_str.split('/')
2021-12-26 10:38:02 +00:00
elif '+' in lang_str:
2021-12-26 10:40:24 +00:00
lang_list_temp = lang_str.split('+')
2021-12-26 10:38:02 +00:00
elif ' ' in lang_str:
2021-12-26 10:40:24 +00:00
lang_list_temp = lang_str.split(' ')
2021-12-26 10:35:37 +00:00
lang_list = []
2021-12-26 10:40:24 +00:00
for lang in lang_list_temp:
2021-08-11 09:00:17 +00:00
lang = lang.strip()
2021-12-26 10:35:37 +00:00
if lang not in lang_list:
lang_list.append(lang)
lang_list.sort()
return lang_list
return []
2021-12-26 10:50:49 +00:00
def get_content_from_post(post_json_object: {}, system_language: str,
2021-12-26 10:52:54 +00:00
languages_understood: []) -> str:
"""Returns the content from the post in the given language
2021-07-19 19:40:04 +00:00
including searching for a matching entry within contentMap
"""
2021-12-26 10:54:58 +00:00
this_post_json = post_json_object
2021-12-26 10:57:03 +00:00
if has_object_dict(post_json_object):
2021-12-26 10:54:58 +00:00
this_post_json = post_json_object['object']
if not this_post_json.get('content'):
return ''
2021-07-20 12:28:56 +00:00
content = ''
2021-12-26 10:54:58 +00:00
if this_post_json.get('contentMap'):
if isinstance(this_post_json['contentMap'], dict):
if this_post_json['contentMap'].get(system_language):
sys_lang = this_post_json['contentMap'][system_language]
2021-12-25 23:03:28 +00:00
if isinstance(sys_lang, str):
2021-12-26 10:54:58 +00:00
return this_post_json['contentMap'][system_language]
else:
# is there a contentMap entry for one of
# the understood languages?
2021-12-26 10:52:54 +00:00
for lang in languages_understood:
2021-12-26 10:54:58 +00:00
if this_post_json['contentMap'].get(lang):
return this_post_json['contentMap'][lang]
2021-07-20 12:28:56 +00:00
else:
2021-12-26 10:54:58 +00:00
if isinstance(this_post_json['content'], str):
content = this_post_json['content']
return content
2021-12-26 11:29:40 +00:00
def get_base_content_from_post(post_json_object: {},
system_language: str) -> str:
2021-07-19 19:40:04 +00:00
"""Returns the content from the post in the given language
"""
2021-12-26 10:54:58 +00:00
this_post_json = post_json_object
2021-12-26 10:57:03 +00:00
if has_object_dict(post_json_object):
2021-12-26 10:54:58 +00:00
this_post_json = post_json_object['object']
if not this_post_json.get('content'):
2021-07-19 19:40:04 +00:00
return ''
2021-12-26 10:54:58 +00:00
return this_post_json['content']
2021-07-19 19:40:04 +00:00
2021-12-26 12:02:29 +00:00
def acct_dir(base_dir: str, nickname: str, domain: str) -> str:
2021-12-25 16:17:53 +00:00
return base_dir + '/accounts/' + nickname + '@' + domain
2021-07-13 21:59:53 +00:00
2021-12-26 12:07:40 +00:00
def is_featured_writer(base_dir: str, nickname: str, domain: str) -> bool:
2021-02-13 11:37:02 +00:00
"""Is the given account a featured writer, appearing in the features
timeline on news instances?
"""
2021-12-26 12:10:21 +00:00
features_blocked_filename = \
2021-12-26 12:02:29 +00:00
acct_dir(base_dir, nickname, domain) + '/.nofeatures'
2021-12-26 12:10:21 +00:00
return not os.path.isfile(features_blocked_filename)
2021-02-13 11:37:02 +00:00
2021-12-26 12:10:21 +00:00
def refresh_newswire(base_dir: str):
"""Causes the newswire to be updates after a change to user accounts
"""
2021-12-26 12:13:46 +00:00
refresh_newswire_filename = base_dir + '/accounts/.refresh_newswire'
if os.path.isfile(refresh_newswire_filename):
return
2021-12-26 12:13:46 +00:00
with open(refresh_newswire_filename, 'w+') as refresh_file:
refresh_file.write('\n')
2021-12-26 12:13:46 +00:00
def get_sha_256(msg: str):
"""Returns a SHA256 hash of the given string
"""
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(msg)
return digest.finalize()
2021-09-08 10:05:45 +00:00
2021-12-26 12:13:46 +00:00
def get_sha_512(msg: str):
2021-09-08 10:05:45 +00:00
"""Returns a SHA512 hash of the given string
"""
digest = hashes.Hash(hashes.SHA512(), backend=default_backend())
digest.update(msg)
return digest.finalize()
2019-07-02 09:25:29 +00:00
2020-04-04 13:44:49 +00:00
2021-12-26 12:16:36 +00:00
def _local_network_host(host: str) -> bool:
"""Returns true if the given host is on the local network
"""
2021-01-25 11:51:42 +00:00
if host.startswith('localhost') or \
host.startswith('192.') or \
host.startswith('127.') or \
host.startswith('10.'):
return True
return False
2021-12-26 12:21:31 +00:00
def decoded_host(host: str) -> str:
"""Convert hostname to internationalized domain
https://en.wikipedia.org/wiki/Internationalized_domain_name
"""
if ':' not in host:
# eg. mydomain:8000
2021-12-26 12:16:36 +00:00
if not _local_network_host(host):
if not host.endswith('.onion'):
if not host.endswith('.i2p'):
return idna.decode(host)
return host
2021-12-26 12:16:36 +00:00
def get_locked_account(actor_json: {}) -> bool:
"""Returns whether the given account requires follower approval
"""
2021-12-26 10:29:52 +00:00
if not actor_json.get('manuallyApprovesFollowers'):
return False
2021-12-26 10:29:52 +00:00
if actor_json['manuallyApprovesFollowers'] is True:
return True
return False
2021-12-26 12:31:47 +00:00
def has_users_path(path_str: str) -> bool:
2020-12-23 10:57:44 +00:00
"""Whether there is a /users/ path (or equivalent) in the given string
"""
2021-12-26 12:31:47 +00:00
users_list = get_user_paths()
for users_str in users_list:
if users_str in path_str:
2020-12-23 10:57:44 +00:00
return True
2021-12-26 12:31:47 +00:00
if '://' in path_str:
domain = path_str.split('://')[1]
2021-06-03 18:30:48 +00:00
if '/' in domain:
domain = domain.split('/')[0]
2021-12-26 12:31:47 +00:00
if '://' + domain + '/' not in path_str:
2021-06-03 18:30:48 +00:00
return False
2021-12-26 12:31:47 +00:00
nickname = path_str.split('://' + domain + '/')[1]
2021-06-03 18:30:48 +00:00
if '/' in nickname or '.' in nickname:
return False
return True
2020-12-23 10:57:44 +00:00
return False
2021-12-26 12:37:53 +00:00
def valid_post_date(published: str, max_age_days: int, debug: bool) -> bool:
"""Returns true if the published date is recent and is not in the future
"""
2021-12-26 12:37:53 +00:00
baseline_time = datetime.datetime(1970, 1, 1)
2021-12-27 16:18:52 +00:00
days_diff = datetime.datetime.utcnow() - baseline_time
now_days_since_epoch = days_diff.days
2021-01-09 10:23:05 +00:00
try:
2021-12-26 12:37:53 +00:00
post_time_object = \
2021-01-09 10:23:05 +00:00
datetime.datetime.strptime(published, "%Y-%m-%dT%H:%M:%SZ")
except BaseException:
2021-10-29 14:33:52 +00:00
if debug:
2021-12-26 12:31:47 +00:00
print('EX: valid_post_date invalid published date ' +
str(published))
2021-01-09 10:23:05 +00:00
return False
2021-12-26 12:45:03 +00:00
days_diff = post_time_object - baseline_time
post_days_since_epoch = days_diff.days
2021-12-26 12:45:03 +00:00
if post_days_since_epoch > now_days_since_epoch:
2021-03-14 19:53:22 +00:00
if debug:
print("Inbox post has a published date in the future!")
return False
2021-12-26 12:45:03 +00:00
if now_days_since_epoch - post_days_since_epoch >= max_age_days:
2021-03-14 19:53:22 +00:00
if debug:
print("Inbox post is not recent enough")
return False
return True
2021-12-26 12:45:03 +00:00
def get_full_domain(domain: str, port: int) -> str:
2020-12-16 10:30:54 +00:00
"""Returns the full domain name, including port number
"""
if not port:
return domain
if ':' in domain:
return domain
2021-12-27 16:18:52 +00:00
if port in (80, 443):
2020-12-16 10:30:54 +00:00
return domain
return domain + ':' + str(port)
2021-12-26 12:54:51 +00:00
def is_dormant(base_dir: str, nickname: str, domain: str, actor: str,
dormant_months: int) -> bool:
"""Is the given followed actor dormant, from the standpoint
of the given account
"""
2021-12-26 13:17:46 +00:00
last_seen_filename = acct_dir(base_dir, nickname, domain) + \
'/lastseen/' + actor.replace('/', '#') + '.txt'
2021-12-26 13:17:46 +00:00
if not os.path.isfile(last_seen_filename):
return False
2021-12-26 13:17:46 +00:00
days_since_epoch_str = None
2021-12-24 20:43:54 +00:00
try:
2021-12-26 13:17:46 +00:00
with open(last_seen_filename, 'r') as last_seen_file:
days_since_epoch_str = last_seen_file.read()
2021-12-24 20:43:54 +00:00
except OSError:
2021-12-26 13:17:46 +00:00
print('EX: failed to read last seen ' + last_seen_filename)
2021-12-24 20:43:54 +00:00
return False
2021-12-26 13:17:46 +00:00
if days_since_epoch_str:
days_since_epoch = int(days_since_epoch_str)
curr_time = datetime.datetime.utcnow()
2021-12-26 13:21:50 +00:00
curr_days_since_epoch = \
(curr_time - datetime.datetime(1970, 1, 1)).days
2021-12-26 13:24:20 +00:00
time_diff_months = \
2021-12-26 13:21:50 +00:00
int((curr_days_since_epoch - days_since_epoch) / 30)
2021-12-26 13:24:20 +00:00
if time_diff_months >= dormant_months:
return True
return False
2021-12-26 13:27:57 +00:00
def is_editor(base_dir: str, nickname: str) -> bool:
2020-12-01 21:44:27 +00:00
"""Returns true if the given nickname is an editor
"""
2021-12-27 16:18:52 +00:00
editors_file = base_dir + '/accounts/editors.txt'
2020-12-01 21:44:27 +00:00
2021-12-27 16:18:52 +00:00
if not os.path.isfile(editors_file):
2021-12-26 14:11:30 +00:00
admin_name = get_config_param(base_dir, 'admin')
2021-12-27 16:18:52 +00:00
if admin_name:
if admin_name == nickname:
return True
2020-12-01 21:44:27 +00:00
return False
2021-12-27 16:18:52 +00:00
with open(editors_file, 'r') as editors:
lines = editors.readlines()
2020-12-01 21:44:27 +00:00
if len(lines) == 0:
2021-12-26 14:11:30 +00:00
admin_name = get_config_param(base_dir, 'admin')
2021-12-27 16:18:52 +00:00
if admin_name:
if admin_name == nickname:
return True
2020-12-01 21:44:27 +00:00
for editor in lines:
editor = editor.strip('\n').strip('\r')
if editor == nickname:
return True
return False
2021-12-26 14:17:13 +00:00
def is_artist(base_dir: str, nickname: str) -> bool:
"""Returns true if the given nickname is an artist
"""
2021-12-26 14:20:09 +00:00
artists_file = base_dir + '/accounts/artists.txt'
2021-12-26 14:20:09 +00:00
if not os.path.isfile(artists_file):
2021-12-26 14:11:30 +00:00
admin_name = get_config_param(base_dir, 'admin')
2021-12-27 16:18:52 +00:00
if admin_name:
if admin_name == nickname:
return True
return False
2021-12-27 16:18:52 +00:00
with open(artists_file, 'r') as artists:
lines = artists.readlines()
if len(lines) == 0:
2021-12-26 14:11:30 +00:00
admin_name = get_config_param(base_dir, 'admin')
2021-12-27 16:18:52 +00:00
if admin_name:
if admin_name == nickname:
return True
for artist in lines:
artist = artist.strip('\n').strip('\r')
if artist == nickname:
return True
return False
2021-12-26 14:20:09 +00:00
def get_video_extensions() -> []:
2021-08-03 09:09:04 +00:00
"""Returns a list of the possible video file extensions
"""
return ('mp4', 'webm', 'ogv')
2021-12-26 14:24:03 +00:00
def get_audio_extensions() -> []:
2021-08-03 09:09:04 +00:00
"""Returns a list of the possible audio file extensions
"""
return ('mp3', 'ogg', 'flac')
2021-12-26 14:26:16 +00:00
def get_image_extensions() -> []:
2020-11-21 11:21:05 +00:00
"""Returns a list of the possible image file extensions
"""
2021-12-17 12:01:54 +00:00
return ('png', 'jpg', 'jpeg', 'gif', 'webp', 'avif', 'svg', 'ico')
2020-11-21 11:21:05 +00:00
2021-12-26 14:42:21 +00:00
def get_image_mime_type(image_filename: str) -> str:
2021-07-09 20:53:49 +00:00
"""Returns the mime type for the given image
"""
2021-12-27 16:18:52 +00:00
extensions_to_mime = {
2021-07-09 20:53:49 +00:00
'png': 'png',
'jpg': 'jpeg',
'gif': 'gif',
'avif': 'avif',
'svg': 'svg+xml',
2021-12-17 12:01:54 +00:00
'webp': 'webp',
'ico': 'x-icon'
2021-07-09 20:53:49 +00:00
}
2021-12-27 16:18:52 +00:00
for ext, mime_ext in extensions_to_mime.items():
2021-12-26 14:42:21 +00:00
if image_filename.endswith('.' + ext):
2021-12-26 15:44:28 +00:00
return 'image/' + mime_ext
2021-07-09 20:53:49 +00:00
return 'image/png'
2021-12-27 16:02:54 +00:00
def get_image_extension_from_mime_type(content_type: str) -> str:
2021-07-09 20:53:49 +00:00
"""Returns the image extension from a mime type, such as image/jpeg
"""
2021-12-26 15:23:01 +00:00
image_media = {
2021-07-09 20:53:49 +00:00
'png': 'png',
'jpeg': 'jpg',
'gif': 'gif',
'svg+xml': 'svg',
'webp': 'webp',
2021-12-17 12:01:54 +00:00
'avif': 'avif',
'x-icon': 'ico'
2021-07-09 20:53:49 +00:00
}
2021-12-27 16:02:54 +00:00
for mime_ext, ext in image_media.items():
if content_type.endswith(mime_ext):
2021-07-09 20:53:49 +00:00
return ext
return 'png'
2021-12-26 14:39:49 +00:00
def get_media_extensions() -> []:
2020-11-21 11:54:29 +00:00
"""Returns a list of the possible media file extensions
"""
2021-12-26 14:26:16 +00:00
return get_image_extensions() + \
2021-12-26 14:24:03 +00:00
get_video_extensions() + get_audio_extensions()
2020-11-21 11:54:29 +00:00
2021-12-26 15:44:28 +00:00
def get_image_formats() -> str:
2020-11-21 11:21:05 +00:00
"""Returns a string of permissable image formats
used when selecting an image for a new post
"""
2021-12-27 16:02:54 +00:00
image_ext = get_image_extensions()
image_formats = ''
for ext in image_ext:
if image_formats:
image_formats += ', '
image_formats += '.' + ext
return image_formats
2020-11-21 11:21:05 +00:00
2021-12-27 15:58:46 +00:00
def is_image_file(filename: str) -> bool:
2021-07-01 09:51:16 +00:00
"""Is the given filename an image?
"""
2021-12-26 14:26:16 +00:00
for ext in get_image_extensions():
2021-07-01 09:51:16 +00:00
if filename.endswith('.' + ext):
return True
return False
2021-12-27 15:56:15 +00:00
def get_media_formats() -> str:
2020-11-21 11:54:29 +00:00
"""Returns a string of permissable media formats
used when selecting an attachment for a new post
"""
2021-12-26 15:44:28 +00:00
media_ext = get_media_extensions()
2020-11-21 11:54:29 +00:00
2021-12-27 15:58:46 +00:00
media_formats = ''
2021-12-26 15:44:28 +00:00
for ext in media_ext:
2021-12-27 15:58:46 +00:00
if media_formats:
media_formats += ', '
media_formats += '.' + ext
return media_formats
2020-11-21 11:54:29 +00:00
2021-12-27 15:43:22 +00:00
def remove_html(content: str) -> str:
"""Removes html links from the given content.
Used to ensure that profile descriptions don't contain dubious content
"""
if '<' not in content:
return content
removing = False
content = content.replace('<a href', ' <a href')
content = content.replace('<q>', '"').replace('</q>', '"')
2021-03-23 10:52:10 +00:00
content = content.replace('</p>', '\n\n').replace('<br>', '\n')
result = ''
2021-12-27 17:16:57 +00:00
for char in content:
if char == '<':
removing = True
2021-12-27 17:16:57 +00:00
elif char == '>':
removing = False
elif not removing:
2021-12-27 17:16:57 +00:00
result += char
2021-03-23 11:22:09 +00:00
2021-12-27 15:56:15 +00:00
plain_text = result.replace(' ', ' ')
2021-03-23 10:38:03 +00:00
# insert spaces after full stops
2021-12-27 17:16:57 +00:00
str_len = len(plain_text)
2021-03-23 10:38:03 +00:00
result = ''
2021-12-27 17:16:57 +00:00
for i in range(str_len):
2021-12-27 15:56:15 +00:00
result += plain_text[i]
2021-12-27 17:16:57 +00:00
if plain_text[i] == '.' and i < str_len - 1:
2021-12-27 15:56:15 +00:00
if plain_text[i + 1] >= 'A' and plain_text[i + 1] <= 'Z':
2021-03-23 10:38:03 +00:00
result += ' '
2021-03-23 11:22:09 +00:00
result = result.replace(' ', ' ').strip()
return result
2021-12-27 15:52:08 +00:00
def first_paragraph_from_string(content: str) -> str:
2020-11-08 10:52:07 +00:00
"""Get the first paragraph from a blog post
to be used as a summary in the newswire feed
"""
if '<p>' not in content or '</p>' not in content:
2021-12-27 15:43:22 +00:00
return remove_html(content)
2020-11-08 10:52:07 +00:00
paragraph = content.split('<p>')[1]
if '</p>' in paragraph:
paragraph = paragraph.split('</p>')[0]
2021-12-27 15:43:22 +00:00
return remove_html(paragraph)
2020-11-08 10:52:07 +00:00
2021-12-27 15:41:04 +00:00
def is_system_account(nickname: str) -> bool:
2020-10-13 11:13:32 +00:00
"""Returns true if the given nickname is a system account
"""
2021-12-27 17:16:57 +00:00
if nickname in ('news', 'inbox'):
2020-10-13 11:13:32 +00:00
return True
return False
2021-12-26 14:37:28 +00:00
def _create_config(base_dir: str) -> None:
2020-10-06 08:58:44 +00:00
"""Creates a configuration file
"""
2021-12-26 14:37:28 +00:00
config_filename = base_dir + '/config.json'
if os.path.isfile(config_filename):
2020-10-06 08:58:44 +00:00
return
2021-12-26 14:37:28 +00:00
config_json = {
2020-10-06 08:58:44 +00:00
}
2021-12-26 14:47:21 +00:00
save_json(config_json, config_filename)
2020-10-06 08:58:44 +00:00
2021-12-27 20:38:02 +00:00
def set_config_param(base_dir: str, variable_name: str,
variable_value) -> None:
2020-10-06 08:58:44 +00:00
"""Sets a configuration value
"""
2021-12-26 14:37:28 +00:00
_create_config(base_dir)
config_filename = base_dir + '/config.json'
2021-12-27 20:38:02 +00:00
config_json = {}
2021-12-26 14:37:28 +00:00
if os.path.isfile(config_filename):
2021-12-27 20:38:02 +00:00
config_json = load_json(config_filename)
variable_name = _convert_to_camel_case(variable_name)
2021-12-27 20:38:02 +00:00
config_json[variable_name] = variable_value
save_json(config_json, config_filename)
2020-10-06 08:58:44 +00:00
2021-12-27 15:41:04 +00:00
def get_config_param(base_dir: str, variable_name: str):
2020-10-06 08:58:44 +00:00
"""Gets a configuration value
"""
2021-12-26 14:37:28 +00:00
_create_config(base_dir)
config_filename = base_dir + '/config.json'
2021-12-27 17:16:57 +00:00
config_json = load_json(config_filename)
if config_json:
variable_name = _convert_to_camel_case(variable_name)
2021-12-27 17:16:57 +00:00
if variable_name in config_json:
return config_json[variable_name]
2020-10-06 08:58:44 +00:00
return None
2021-12-27 15:37:31 +00:00
def is_suspended(base_dir: str, nickname: str) -> bool:
2020-10-06 08:58:44 +00:00
"""Returns true if the given nickname is suspended
"""
2021-12-27 15:37:31 +00:00
admin_nickname = get_config_param(base_dir, 'admin')
if not admin_nickname:
2020-10-10 16:10:32 +00:00
return False
2021-12-27 15:37:31 +00:00
if nickname == admin_nickname:
2020-10-06 08:58:44 +00:00
return False
2021-12-27 15:37:31 +00:00
suspended_filename = base_dir + '/accounts/suspended.txt'
if os.path.isfile(suspended_filename):
2021-12-27 17:16:57 +00:00
with open(suspended_filename, 'r') as susp_file:
lines = susp_file.readlines()
2020-10-06 08:58:44 +00:00
for suspended in lines:
if suspended.strip('\n').strip('\r') == nickname:
return True
return False
2021-12-27 13:58:17 +00:00
def get_followers_list(base_dir: str,
nickname: str, domain: str,
follow_file='following.txt') -> []:
2020-09-25 14:14:59 +00:00
"""Returns a list of followers for the given account
"""
2021-12-27 13:58:17 +00:00
filename = acct_dir(base_dir, nickname, domain) + '/' + follow_file
2020-09-25 14:14:59 +00:00
if not os.path.isfile(filename):
return []
2021-12-27 17:16:57 +00:00
with open(filename, 'r') as foll_file:
lines = foll_file.readlines()
2020-09-25 14:14:59 +00:00
for i in range(len(lines)):
lines[i] = lines[i].strip()
return lines
return []
2021-12-27 11:31:04 +00:00
def get_followers_of_person(base_dir: str,
nickname: str, domain: str,
follow_file='following.txt') -> []:
2020-09-25 13:21:56 +00:00
"""Returns a list containing the followers of the given person
Used by the shared inbox to know who to send incoming mail to
"""
followers = []
2021-12-26 18:17:37 +00:00
domain = remove_domain_port(domain)
2020-09-25 13:21:56 +00:00
handle = nickname + '@' + domain
2021-12-25 16:17:53 +00:00
if not os.path.isdir(base_dir + '/accounts/' + handle):
2020-09-25 13:21:56 +00:00
return followers
2021-12-27 17:16:57 +00:00
for subdir, dirs, _ in os.walk(base_dir + '/accounts'):
2020-09-25 13:21:56 +00:00
for account in dirs:
2021-12-27 11:31:04 +00:00
filename = os.path.join(subdir, account) + '/' + follow_file
2021-04-21 16:09:56 +00:00
if account == handle or \
account.startswith('inbox@') or \
account.startswith('news@'):
2020-09-25 13:21:56 +00:00
continue
if not os.path.isfile(filename):
continue
with open(filename, 'r') as followingfile:
2021-12-27 13:58:17 +00:00
for following_handle in followingfile:
following_handle2 = following_handle.replace('\n', '')
following_handle2 = following_handle2.replace('\r', '')
if following_handle2 == handle:
2020-09-25 13:21:56 +00:00
if account not in followers:
followers.append(account)
break
2020-12-13 22:13:45 +00:00
break
2020-09-25 13:21:56 +00:00
return followers
2021-12-27 11:31:04 +00:00
def remove_id_ending(id_str: str) -> str:
2020-08-23 11:13:35 +00:00
"""Removes endings such as /activity and /undo
"""
2021-12-27 11:31:04 +00:00
if id_str.endswith('/activity'):
id_str = id_str[:-len('/activity')]
elif id_str.endswith('/undo'):
id_str = id_str[:-len('/undo')]
elif id_str.endswith('/event'):
id_str = id_str[:-len('/event')]
elif id_str.endswith('/replies'):
id_str = id_str[:-len('/replies')]
if id_str.endswith('#Create'):
id_str = id_str.split('#Create')[0]
return id_str
2020-08-23 11:13:35 +00:00
2021-12-27 17:16:57 +00:00
def remove_hash_from_post_id(post_id: str) -> str:
2021-12-06 12:42:05 +00:00
"""Removes any has from a post id
"""
2021-12-26 19:47:06 +00:00
if '#' not in post_id:
return post_id
return post_id.split('#')[0]
2021-12-06 12:42:05 +00:00
2021-12-27 17:20:01 +00:00
def get_protocol_prefixes() -> []:
2020-06-11 12:26:15 +00:00
"""Returns a list of valid prefixes
"""
2021-01-02 10:37:19 +00:00
return ('https://', 'http://', 'ftp://',
'dat://', 'i2p://', 'gnunet://',
2020-06-11 12:26:15 +00:00
'hyper://', 'gemini://', 'gopher://')
2021-12-27 17:32:34 +00:00
def get_link_prefixes() -> []:
2020-06-11 12:26:15 +00:00
"""Returns a list of valid web link prefixes
"""
2021-01-02 10:37:19 +00:00
return ('https://', 'http://', 'ftp://',
2021-09-21 10:24:42 +00:00
'dat://', 'i2p://', 'gnunet://', 'payto://',
2020-06-11 12:26:15 +00:00
'hyper://', 'gemini://', 'gopher://', 'briar:')
2021-12-27 20:43:15 +00:00
def remove_avatar_from_cache(base_dir: str, actor_str: str) -> None:
"""Removes any existing avatar entries from the cache
This avoids duplicate entries with differing extensions
"""
2021-12-27 17:35:58 +00:00
avatar_filename_extensions = get_image_extensions()
for extension in avatar_filename_extensions:
avatar_filename = \
2021-12-27 20:43:15 +00:00
base_dir + '/cache/avatars/' + actor_str + '.' + extension
2021-12-27 17:35:58 +00:00
if os.path.isfile(avatar_filename):
try:
2021-12-27 17:35:58 +00:00
os.remove(avatar_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-27 17:35:58 +00:00
print('EX: remove_avatar_from_cache ' +
'unable to delete cached avatar ' +
str(avatar_filename))
2020-04-04 13:44:49 +00:00
2021-12-26 15:13:34 +00:00
def save_json(json_object: {}, filename: str) -> bool:
2019-10-22 11:55:06 +00:00
"""Saves json to a file
"""
2020-04-04 13:44:49 +00:00
tries = 0
while tries < 5:
try:
2021-12-27 17:42:35 +00:00
with open(filename, 'w+') as json_file:
json_file.write(json.dumps(json_object))
return True
2021-11-25 22:22:54 +00:00
except OSError:
2021-12-26 14:47:21 +00:00
print('EX: save_json ' + str(tries))
time.sleep(1)
tries += 1
2019-10-22 11:55:06 +00:00
return False
2020-04-04 13:44:49 +00:00
2021-12-26 15:28:08 +00:00
def load_json(filename: str, delay_sec: int = 2, max_tries: int = 5) -> {}:
2019-10-22 11:55:06 +00:00
"""Makes a few attempts to load a json formatted file
"""
2021-12-27 17:42:35 +00:00
json_object = None
2020-04-04 13:44:49 +00:00
tries = 0
2021-12-26 15:20:36 +00:00
while tries < max_tries:
2019-10-22 11:55:06 +00:00
try:
2021-12-27 17:42:35 +00:00
with open(filename, 'r') as json_file:
data = json_file.read()
json_object = json.loads(data)
2019-10-22 11:55:06 +00:00
break
2020-04-04 13:44:49 +00:00
except BaseException:
2021-12-26 15:13:34 +00:00
print('EX: load_json exception ' + str(filename))
2021-12-26 15:28:08 +00:00
if delay_sec > 0:
time.sleep(delay_sec)
2020-04-04 13:44:49 +00:00
tries += 1
2021-12-27 17:42:35 +00:00
return json_object
2019-10-22 11:55:06 +00:00
2020-04-04 13:44:49 +00:00
2021-12-26 15:13:34 +00:00
def load_json_onionify(filename: str, domain: str, onion_domain: str,
2021-12-26 15:28:08 +00:00
delay_sec: int = 2) -> {}:
2020-03-02 14:35:44 +00:00
"""Makes a few attempts to load a json formatted file
This also converts the domain name to the onion domain
"""
2021-12-27 17:42:35 +00:00
json_object = None
2020-04-04 13:44:49 +00:00
tries = 0
while tries < 5:
2020-03-02 14:35:44 +00:00
try:
2021-12-27 17:42:35 +00:00
with open(filename, 'r') as json_file:
data = json_file.read()
if data:
2021-12-25 20:43:43 +00:00
data = data.replace(domain, onion_domain)
data = data.replace('https:', 'http:')
print('*****data: ' + data)
2021-12-27 17:42:35 +00:00
json_object = json.loads(data)
break
2020-04-04 13:44:49 +00:00
except BaseException:
2021-12-26 15:13:34 +00:00
print('EX: load_json_onionify exception ' + str(filename))
2021-12-26 15:28:08 +00:00
if delay_sec > 0:
time.sleep(delay_sec)
2020-04-04 13:44:49 +00:00
tries += 1
2021-12-27 17:42:35 +00:00
return json_object
2020-03-02 14:35:44 +00:00
2020-04-04 13:44:49 +00:00
2021-12-27 17:46:27 +00:00
def get_status_number(published_str: str = None) -> (str, str):
2019-07-02 09:25:29 +00:00
"""Returns the status number and published date
"""
2021-12-27 17:46:27 +00:00
if not published_str:
2021-12-26 13:17:46 +00:00
curr_time = datetime.datetime.utcnow()
2020-10-07 16:55:15 +00:00
else:
2021-12-26 13:17:46 +00:00
curr_time = \
2021-12-27 17:46:27 +00:00
datetime.datetime.strptime(published_str, '%Y-%m-%dT%H:%M:%SZ')
2021-12-26 13:17:46 +00:00
days_since_epoch = (curr_time - datetime.datetime(1970, 1, 1)).days
2019-07-02 09:25:29 +00:00
# status is the number of seconds since epoch
2021-12-27 17:46:27 +00:00
status_number = \
2021-12-26 13:17:46 +00:00
str(((days_since_epoch * 24 * 60 * 60) +
(curr_time.hour * 60 * 60) +
(curr_time.minute * 60) +
curr_time.second) * 1000 +
int(curr_time.microsecond / 1000))
2020-04-04 13:44:49 +00:00
# See https://github.com/tootsuite/mastodon/blob/
# 995f8b389a66ab76ec92d9a240de376f1fc13a38/lib/mastodon/snowflake.rb
2019-10-12 12:45:53 +00:00
# use the leftover microseconds as the sequence number
2021-12-27 17:46:27 +00:00
sequence_id = curr_time.microsecond % 1000
2019-10-12 12:45:53 +00:00
# shift by 16bits "sequence data"
2021-12-27 17:46:27 +00:00
status_number = str((int(status_number) << 16) + sequence_id)
2021-12-26 13:17:46 +00:00
published = curr_time.strftime("%Y-%m-%dT%H:%M:%SZ")
2021-12-27 17:46:27 +00:00
return status_number, published
2020-04-04 13:44:49 +00:00
2019-07-02 09:25:29 +00:00
2021-12-27 17:46:27 +00:00
def evil_incarnate() -> []:
2021-12-27 20:43:15 +00:00
"""Hardcoded blocked domains
"""
2021-10-23 22:56:16 +00:00
return ('fedilist.com', 'gab.com', 'gabfed.com', 'spinster.xyz',
2020-04-04 13:44:49 +00:00
'kiwifarms.cc', 'djitter.com')
2020-03-28 10:33:04 +00:00
2021-12-27 17:49:35 +00:00
def is_evil(domain: str) -> bool:
2021-12-27 17:53:41 +00:00
""" https://www.youtube.com/watch?v=5qw1hcevmdU
"""
2020-01-17 23:19:17 +00:00
if not isinstance(domain, str):
2020-04-04 13:44:49 +00:00
print('WARN: Malformed domain ' + str(domain))
2020-01-17 23:19:17 +00:00
return True
2021-02-06 21:05:09 +00:00
# if a domain contains any of these strings then it is
# declaring itself to be hostile
2021-12-27 17:53:41 +00:00
evil_emporium = (
2021-02-06 21:05:09 +00:00
'nazi', 'extremis', 'extreemis', 'gendercritic',
'kiwifarm', 'illegal', 'raplst', 'rapist',
'antivax', 'plandemic'
)
2021-12-27 17:53:41 +00:00
for hostile_str in evil_emporium:
if hostile_str in domain:
2021-02-06 21:05:09 +00:00
return True
2021-12-27 17:53:41 +00:00
evil_domains = evil_incarnate()
for concentrated_evil in evil_domains:
if domain.endswith(concentrated_evil):
2019-09-09 15:53:23 +00:00
return True
return False
2020-04-04 13:44:49 +00:00
2021-12-27 17:53:41 +00:00
def contains_invalid_chars(json_str: str) -> bool:
2020-10-15 08:59:08 +00:00
"""Does the given json string contain invalid characters?
"""
2021-12-27 17:53:41 +00:00
for is_invalid in INVALID_CHARACTERS:
if is_invalid in json_str:
2020-10-15 08:59:08 +00:00
return True
return False
2021-12-27 19:33:45 +00:00
def remove_invalid_chars(text: str) -> str:
2021-02-11 10:33:56 +00:00
"""Removes any invalid characters from a string
"""
2021-12-27 17:57:27 +00:00
for is_invalid in INVALID_CHARACTERS:
if is_invalid not in text:
2021-02-11 10:33:56 +00:00
continue
2021-12-27 17:57:27 +00:00
text = text.replace(is_invalid, '')
2021-02-11 10:33:56 +00:00
return text
2021-12-27 19:26:54 +00:00
def create_person_dir(nickname: str, domain: str, base_dir: str,
dir_name: str) -> str:
2019-07-04 10:02:56 +00:00
"""Create a directory for a person
2019-07-02 09:25:29 +00:00
"""
2020-04-04 13:44:49 +00:00
handle = nickname + '@' + domain
2021-12-25 16:17:53 +00:00
if not os.path.isdir(base_dir + '/accounts/' + handle):
os.mkdir(base_dir + '/accounts/' + handle)
2021-12-27 17:57:27 +00:00
box_dir = base_dir + '/accounts/' + handle + '/' + dir_name
if not os.path.isdir(box_dir):
os.mkdir(box_dir)
return box_dir
2019-07-04 10:02:56 +00:00
2020-04-04 13:44:49 +00:00
2021-12-27 17:57:27 +00:00
def create_outbox_dir(nickname: str, domain: str, base_dir: str) -> str:
2019-07-04 10:02:56 +00:00
"""Create an outbox for a person
"""
2021-12-27 19:26:54 +00:00
return create_person_dir(nickname, domain, base_dir, 'outbox')
2019-07-04 10:02:56 +00:00
2020-04-04 13:44:49 +00:00
2021-12-27 18:00:51 +00:00
def create_inbox_queue_dir(nickname: str, domain: str, base_dir: str) -> str:
2019-07-04 10:02:56 +00:00
"""Create an inbox queue and returns the feed filename and directory
"""
2021-12-27 19:26:54 +00:00
return create_person_dir(nickname, domain, base_dir, 'queue')
2020-04-04 13:44:49 +00:00
2019-07-02 10:39:55 +00:00
2021-12-27 19:26:54 +00:00
def domain_permitted(domain: str, federation_list: []) -> bool:
"""Is the given domain permitted according to the federation list?
"""
2021-12-25 23:45:30 +00:00
if len(federation_list) == 0:
2019-07-02 10:39:55 +00:00
return True
2021-12-26 18:17:37 +00:00
domain = remove_domain_port(domain)
2021-12-25 23:45:30 +00:00
if domain in federation_list:
2019-07-02 10:39:55 +00:00
return True
return False
2020-04-04 13:44:49 +00:00
2021-12-27 20:47:05 +00:00
def url_permitted(url: str, federation_list: []):
2021-12-27 17:49:35 +00:00
if is_evil(url):
2019-09-09 15:53:23 +00:00
return False
2021-12-25 23:45:30 +00:00
if not federation_list:
2019-07-02 10:39:55 +00:00
return True
2021-12-25 23:45:30 +00:00
for domain in federation_list:
2019-07-02 10:39:55 +00:00
if domain in url:
return True
return False
2019-07-06 15:17:21 +00:00
2020-04-04 13:44:49 +00:00
2021-12-27 20:43:15 +00:00
def get_local_network_addresses() -> []:
2021-02-15 10:06:49 +00:00
"""Returns patterns for local network address detection
"""
return ('localhost', '127.0.', '192.168', '10.0.')
2021-12-27 21:42:08 +00:00
def is_local_network_address(ip_address: str) -> bool:
"""Is the given ip address local?
2021-06-09 14:01:26 +00:00
"""
2021-12-27 21:42:08 +00:00
local_ips = get_local_network_addresses()
for ip_addr in local_ips:
if ip_address.startswith(ip_addr):
2021-06-09 14:01:26 +00:00
return True
return False
2021-12-27 21:42:08 +00:00
def _is_dangerous_string(content: str, allow_local_network_access: bool,
separators: [], invalid_strings: []) -> bool:
"""Returns true if the given string is dangerous
"""
2021-12-27 21:42:08 +00:00
for separator_style in separators:
start_char = separator_style[0]
end_char = separator_style[1]
if start_char not in content:
continue
2021-12-27 21:42:08 +00:00
if end_char not in content:
2021-05-19 11:29:37 +00:00
continue
2021-12-27 21:42:08 +00:00
content_sections = content.split(start_char)
invalid_partials = ()
2021-12-25 18:54:50 +00:00
if not allow_local_network_access:
2021-12-27 21:42:08 +00:00
invalid_partials = get_local_network_addresses()
for markup in content_sections:
if end_char not in markup:
2021-05-19 11:29:37 +00:00
continue
2021-12-27 21:42:08 +00:00
markup = markup.split(end_char)[0].strip()
for partial_match in invalid_partials:
if partial_match in markup:
return True
2021-05-19 11:29:37 +00:00
if ' ' not in markup:
2021-12-27 21:42:08 +00:00
for bad_str in invalid_strings:
if bad_str in markup:
2021-05-19 11:29:37 +00:00
return True
else:
2021-12-27 21:42:08 +00:00
for bad_str in invalid_strings:
if bad_str + ' ' in markup:
2021-05-19 11:29:37 +00:00
return True
return False
2021-12-27 21:42:08 +00:00
def dangerous_markup(content: str, allow_local_network_access: bool) -> bool:
"""Returns true if the given content contains dangerous html markup
"""
separators = [['<', '>'], ['&lt;', '&gt;']]
2021-12-27 21:42:08 +00:00
invalid_strings = [
2021-09-19 15:54:51 +00:00
'script', 'noscript', 'code', 'pre',
'canvas', 'style', 'abbr',
'frame', 'iframe', 'html', 'body',
'hr', 'allow-popups', 'allow-scripts'
]
2021-12-27 21:42:08 +00:00
return _is_dangerous_string(content, allow_local_network_access,
separators, invalid_strings)
2021-12-27 21:44:48 +00:00
def dangerous_svg(content: str, allow_local_network_access: bool) -> bool:
"""Returns true if the given svg file content contains dangerous scripts
"""
separators = [['<', '>'], ['&lt;', '&gt;']]
2021-12-27 21:44:48 +00:00
invalid_strings = [
'script'
]
2021-12-27 21:42:08 +00:00
return _is_dangerous_string(content, allow_local_network_access,
2021-12-27 21:44:48 +00:00
separators, invalid_strings)
2021-12-27 21:59:07 +00:00
def get_display_name(base_dir: str, actor: str, person_cache: {}) -> str:
"""Returns the display name for the given actor
2019-08-22 12:41:16 +00:00
"""
if '/statuses/' in actor:
2020-04-04 13:44:49 +00:00
actor = actor.split('/statuses/')[0]
2021-12-25 22:17:49 +00:00
if not person_cache.get(actor):
2019-08-22 13:29:57 +00:00
return None
2021-12-27 22:12:29 +00:00
name_found = None
2021-12-25 22:17:49 +00:00
if person_cache[actor].get('actor'):
if person_cache[actor]['actor'].get('name'):
2021-12-27 22:12:29 +00:00
name_found = person_cache[actor]['actor']['name']
else:
# Try to obtain from the cached actors
2021-12-27 22:12:29 +00:00
cached_actor_filename = \
2021-12-25 16:17:53 +00:00
base_dir + '/cache/actors/' + (actor.replace('/', '#')) + '.json'
2021-12-27 22:12:29 +00:00
if os.path.isfile(cached_actor_filename):
actor_json = load_json(cached_actor_filename, 1)
2021-12-26 10:29:52 +00:00
if actor_json:
if actor_json.get('name'):
2021-12-27 22:12:29 +00:00
name_found = actor_json['name']
if name_found:
if dangerous_markup(name_found, False):
name_found = "*ADVERSARY*"
return name_found
2019-08-22 12:41:16 +00:00
2020-04-04 13:44:49 +00:00
2021-12-27 22:12:29 +00:00
def _gender_from_string(translate: {}, text: str) -> str:
2021-06-24 19:28:26 +00:00
"""Given some text, does it contain a gender description?
"""
2021-06-24 19:25:39 +00:00
gender = None
2021-07-23 14:32:21 +00:00
if not text:
return None
2021-12-27 22:12:29 +00:00
text_orig = text
2021-06-24 19:25:39 +00:00
text = text.lower()
if translate['He/Him'].lower() in text or \
translate['boy'].lower() in text:
gender = 'He/Him'
elif (translate['She/Her'].lower() in text or
translate['girl'].lower() in text):
gender = 'She/Her'
elif 'him' in text or 'male' in text:
gender = 'He/Him'
elif 'her' in text or 'she' in text or \
'fem' in text or 'woman' in text:
gender = 'She/Her'
2021-12-27 22:12:29 +00:00
elif 'man' in text or 'He' in text_orig:
2021-06-24 19:25:39 +00:00
gender = 'He/Him'
return gender
2021-12-27 22:12:29 +00:00
def get_gender_from_bio(base_dir: str, actor: str, person_cache: {},
translate: {}) -> str:
2021-03-03 13:02:47 +00:00
"""Tries to ascertain gender from bio description
2021-06-24 19:25:39 +00:00
This is for use by text-to-speech for pitch setting
2021-03-03 13:02:47 +00:00
"""
2021-12-27 22:19:18 +00:00
default_gender = 'They/Them'
2021-03-03 13:02:47 +00:00
if '/statuses/' in actor:
actor = actor.split('/statuses/')[0]
2021-12-25 22:17:49 +00:00
if not person_cache.get(actor):
2021-12-27 22:19:18 +00:00
return default_gender
bio_found = None
2021-03-03 19:15:32 +00:00
if translate:
2021-12-27 22:19:18 +00:00
pronoun_str = translate['pronoun'].lower()
2021-03-03 19:15:32 +00:00
else:
2021-12-27 22:19:18 +00:00
pronoun_str = 'pronoun'
2021-12-26 10:29:52 +00:00
actor_json = None
2021-12-25 22:17:49 +00:00
if person_cache[actor].get('actor'):
2021-12-26 10:29:52 +00:00
actor_json = person_cache[actor]['actor']
2021-03-03 13:02:47 +00:00
else:
# Try to obtain from the cached actors
2021-12-27 22:19:18 +00:00
cached_actor_filename = \
2021-12-25 16:17:53 +00:00
base_dir + '/cache/actors/' + (actor.replace('/', '#')) + '.json'
2021-12-27 22:19:18 +00:00
if os.path.isfile(cached_actor_filename):
actor_json = load_json(cached_actor_filename, 1)
2021-12-26 10:29:52 +00:00
if not actor_json:
2021-12-27 22:19:18 +00:00
return default_gender
2021-06-24 19:10:23 +00:00
# is gender defined as a profile tag?
2021-12-26 10:29:52 +00:00
if actor_json.get('attachment'):
2021-12-27 22:19:18 +00:00
tags_list = actor_json['attachment']
if isinstance(tags_list, list):
2021-06-24 19:25:39 +00:00
# look for a gender field name
2021-12-27 22:19:18 +00:00
for tag in tags_list:
2021-06-24 19:10:23 +00:00
if not isinstance(tag, dict):
continue
if not tag.get('name') or not tag.get('value'):
continue
if tag['name'].lower() == \
translate['gender'].lower():
2021-12-27 22:19:18 +00:00
bio_found = tag['value']
2021-06-24 19:10:23 +00:00
break
2021-12-27 22:19:18 +00:00
if tag['name'].lower().startswith(pronoun_str):
bio_found = tag['value']
2021-06-24 19:10:23 +00:00
break
2021-06-24 19:25:39 +00:00
# the field name could be anything,
# just look at the value
2021-12-27 22:19:18 +00:00
if not bio_found:
for tag in tags_list:
2021-06-24 19:25:39 +00:00
if not isinstance(tag, dict):
continue
if not tag.get('name') or not tag.get('value'):
continue
2021-12-27 22:12:29 +00:00
gender = _gender_from_string(translate, tag['value'])
2021-06-24 19:25:39 +00:00
if gender:
return gender
2021-06-24 19:10:23 +00:00
# if not then use the bio
2021-12-27 22:19:18 +00:00
if not bio_found and actor_json.get('summary'):
bio_found = actor_json['summary']
if not bio_found:
return default_gender
gender = _gender_from_string(translate, bio_found)
2021-06-24 19:25:39 +00:00
if not gender:
2021-12-27 22:19:18 +00:00
gender = default_gender
2021-03-03 13:02:47 +00:00
return gender
2021-12-27 22:19:18 +00:00
def get_nickname_from_actor(actor: str) -> str:
2019-07-06 15:17:21 +00:00
"""Returns the nickname from an actor url
"""
2020-08-13 16:41:02 +00:00
if actor.startswith('@'):
actor = actor[1:]
2021-12-27 22:32:59 +00:00
users_paths = get_user_paths()
for possible_path in users_paths:
if possible_path in actor:
nick_str = actor.split(possible_path)[1].replace('@', '')
if '/' not in nick_str:
return nick_str
return nick_str.split('/')[0]
2021-06-24 19:55:29 +00:00
if '/@' in actor:
# https://domain/@nick
2021-12-27 22:32:59 +00:00
nick_str = actor.split('/@')[1]
if '/' in nick_str:
nick_str = nick_str.split('/')[0]
return nick_str
if '@' in actor:
nick_str = actor.split('@')[0]
return nick_str
if '://' in actor:
2021-06-24 19:55:29 +00:00
domain = actor.split('://')[1]
if '/' in domain:
domain = domain.split('/')[0]
if '://' + domain + '/' not in actor:
return None
2021-12-27 22:32:59 +00:00
nick_str = actor.split('://' + domain + '/')[1]
if '/' in nick_str or '.' in nick_str:
2021-06-24 19:55:29 +00:00
return None
2021-12-27 22:32:59 +00:00
return nick_str
2021-06-24 19:55:29 +00:00
return None
2019-07-06 15:17:21 +00:00
2020-04-04 13:44:49 +00:00
2021-12-26 12:24:40 +00:00
def get_user_paths() -> []:
2021-07-04 22:58:01 +00:00
"""Returns possible user paths
e.g. /users/nickname, /channel/nickname
2021-07-04 22:58:01 +00:00
"""
2021-09-13 13:57:37 +00:00
return ('/users/', '/profile/', '/accounts/', '/channel/', '/u/',
'/c/', '/video-channels/')
2021-07-04 22:58:01 +00:00
2021-12-26 17:53:07 +00:00
def get_group_paths() -> []:
2021-07-30 13:00:23 +00:00
"""Returns possible group paths
2021-07-30 13:03:29 +00:00
e.g. https://lemmy/c/groupname
2021-07-30 13:00:23 +00:00
"""
2021-09-13 13:57:37 +00:00
return ['/c/', '/video-channels/']
2021-07-30 13:00:23 +00:00
2021-12-27 19:05:25 +00:00
def get_domain_from_actor(actor: str) -> (str, int):
2019-07-06 15:17:21 +00:00
"""Returns the domain name from an actor url
"""
2020-08-13 16:41:02 +00:00
if actor.startswith('@'):
actor = actor[1:]
2020-04-04 13:44:49 +00:00
port = None
2021-12-27 17:20:01 +00:00
prefixes = get_protocol_prefixes()
2021-12-27 22:32:59 +00:00
users_paths = get_user_paths()
for possible_path in users_paths:
if possible_path in actor:
domain = actor.split(possible_path)[0]
2021-06-24 19:55:29 +00:00
for prefix in prefixes:
domain = domain.replace(prefix, '')
break
if '/@' in actor:
2020-08-13 16:41:02 +00:00
domain = actor.split('/@')[0]
for prefix in prefixes:
domain = domain.replace(prefix, '')
elif '@' in actor:
domain = actor.split('@')[1].strip()
2019-07-06 15:17:21 +00:00
else:
2020-08-13 16:41:02 +00:00
domain = actor
2020-08-13 16:19:35 +00:00
for prefix in prefixes:
domain = domain.replace(prefix, '')
2020-08-13 16:41:02 +00:00
if '/' in actor:
domain = domain.split('/')[0]
2019-07-06 15:17:21 +00:00
if ':' in domain:
2021-12-26 18:14:21 +00:00
port = get_port_from_domain(domain)
2021-12-26 18:17:37 +00:00
domain = remove_domain_port(domain)
2020-04-04 13:44:49 +00:00
return domain, port
2021-12-27 19:26:54 +00:00
def _set_default_pet_name(base_dir: str, nickname: str, domain: str,
follow_nickname: str, follow_domain: str) -> None:
2020-11-23 15:07:55 +00:00
"""Sets a default petname
This helps especially when using onion or i2p address
"""
2021-12-26 18:17:37 +00:00
domain = remove_domain_port(domain)
2021-12-27 22:32:59 +00:00
user_path = acct_dir(base_dir, nickname, domain)
petnames_filename = user_path + '/petnames.txt'
2020-11-23 15:07:55 +00:00
2021-12-27 22:32:59 +00:00
petname_lookup_entry = follow_nickname + ' ' + \
2021-12-27 19:26:54 +00:00
follow_nickname + '@' + follow_domain + '\n'
2021-12-27 22:32:59 +00:00
if not os.path.isfile(petnames_filename):
2020-11-23 15:07:55 +00:00
# if there is no existing petnames lookup file
2021-12-27 22:32:59 +00:00
with open(petnames_filename, 'w+') as petnames_file:
petnames_file.write(petname_lookup_entry)
2020-11-23 15:07:55 +00:00
return
2021-12-27 22:32:59 +00:00
with open(petnames_filename, 'r') as petnames_file:
petnames_str = petnames_file.read()
if petnames_str:
petnames_list = petnames_str.split('\n')
for pet in petnames_list:
2021-12-27 19:26:54 +00:00
if pet.startswith(follow_nickname + ' '):
# petname already exists
return
2020-11-23 15:07:55 +00:00
# petname doesn't already exist
2021-12-27 22:32:59 +00:00
with open(petnames_filename, 'a+') as petnames_file:
petnames_file.write(petname_lookup_entry)
2020-11-23 15:07:55 +00:00
2021-12-27 17:08:19 +00:00
def follow_person(base_dir: str, nickname: str, domain: str,
2021-12-27 19:26:54 +00:00
follow_nickname: str, follow_domain: str,
2021-12-27 17:08:19 +00:00
federation_list: [], debug: bool,
group_account: bool,
follow_file: str = 'following.txt') -> bool:
2019-07-06 19:24:52 +00:00
"""Adds a person to the follow list
"""
2021-12-27 22:32:59 +00:00
follow_domain_str_lower = follow_domain.lower().replace('\n', '')
if not domain_permitted(follow_domain_str_lower,
2021-12-27 18:28:26 +00:00
federation_list):
2019-07-06 19:24:52 +00:00
if debug:
2020-04-04 13:44:49 +00:00
print('DEBUG: follow of domain ' +
2021-12-27 19:26:54 +00:00
follow_domain + ' not permitted')
2019-07-06 19:24:52 +00:00
return False
2019-07-11 12:29:31 +00:00
if debug:
2021-12-27 19:26:54 +00:00
print('DEBUG: follow of domain ' + follow_domain)
2019-07-16 22:57:45 +00:00
if ':' in domain:
2021-12-27 22:32:59 +00:00
domain_only = remove_domain_port(domain)
handle = nickname + '@' + domain_only
2019-07-16 22:57:45 +00:00
else:
2020-09-15 09:16:03 +00:00
handle = nickname + '@' + domain
2020-03-03 11:02:34 +00:00
2021-12-25 16:17:53 +00:00
if not os.path.isdir(base_dir + '/accounts/' + handle):
2020-04-04 13:44:49 +00:00
print('WARN: account for ' + handle + ' does not exist')
2020-03-03 09:56:48 +00:00
return False
2021-12-27 19:26:54 +00:00
if ':' in follow_domain:
2021-12-27 22:32:59 +00:00
follow_domain_only = remove_domain_port(follow_domain)
handle_to_follow = follow_nickname + '@' + follow_domain_only
2019-07-16 22:57:45 +00:00
else:
2021-12-27 22:32:59 +00:00
handle_to_follow = follow_nickname + '@' + follow_domain
2021-12-26 00:07:44 +00:00
if group_account:
2021-12-27 22:32:59 +00:00
handle_to_follow = '!' + handle_to_follow
2021-07-31 11:56:28 +00:00
# was this person previously unfollowed?
2021-12-27 22:32:59 +00:00
unfollowed_filename = base_dir + '/accounts/' + handle + '/unfollowed.txt'
if os.path.isfile(unfollowed_filename):
if handle_to_follow in open(unfollowed_filename).read():
# remove them from the unfollowed file
2021-12-27 22:32:59 +00:00
new_lines = ''
with open(unfollowed_filename, 'r') as unfoll_file:
lines = unfoll_file.readlines()
for line in lines:
2021-12-27 22:32:59 +00:00
if handle_to_follow not in line:
new_lines += line
with open(unfollowed_filename, 'w+') as unfoll_file:
unfoll_file.write(new_lines)
2021-12-25 16:17:53 +00:00
if not os.path.isdir(base_dir + '/accounts'):
os.mkdir(base_dir + '/accounts')
2021-12-27 22:32:59 +00:00
handle_to_follow = follow_nickname + '@' + follow_domain
2021-12-26 00:07:44 +00:00
if group_account:
2021-12-27 22:32:59 +00:00
handle_to_follow = '!' + handle_to_follow
2021-12-27 11:31:04 +00:00
filename = base_dir + '/accounts/' + handle + '/' + follow_file
2019-07-06 19:24:52 +00:00
if os.path.isfile(filename):
2021-12-27 22:32:59 +00:00
if handle_to_follow in open(filename).read():
2019-07-11 12:29:31 +00:00
if debug:
print('DEBUG: follow already exists')
2019-07-06 19:24:52 +00:00
return True
2019-10-26 15:15:38 +00:00
# prepend to follow file
try:
2021-12-27 17:08:19 +00:00
with open(filename, 'r+') as foll_file:
content = foll_file.read()
2021-12-27 22:32:59 +00:00
if handle_to_follow + '\n' not in content:
2021-12-27 17:08:19 +00:00
foll_file.seek(0, 0)
2021-12-27 22:32:59 +00:00
foll_file.write(handle_to_follow + '\n' + content)
print('DEBUG: follow added')
2021-12-27 16:18:52 +00:00
except OSError as ex:
2020-04-04 13:44:49 +00:00
print('WARN: Failed to write entry to follow file ' +
2021-12-25 15:28:52 +00:00
filename + ' ' + str(ex))
2020-09-03 10:09:40 +00:00
else:
# first follow
if debug:
print('DEBUG: ' + handle +
2021-12-27 22:32:59 +00:00
' creating new following file to follow ' +
handle_to_follow +
', filename is ' + filename)
2021-12-27 22:32:59 +00:00
with open(filename, 'w+') as foll_file:
foll_file.write(handle_to_follow + '\n')
2020-09-03 10:09:40 +00:00
2021-12-27 11:31:04 +00:00
if follow_file.endswith('following.txt'):
2020-11-23 15:07:55 +00:00
# Default to adding new follows to the calendar.
# Possibly this could be made optional
2020-09-03 10:09:40 +00:00
# if following a person add them to the list of
# calendar follows
print('DEBUG: adding ' +
2021-12-27 19:26:54 +00:00
follow_nickname + '@' + follow_domain + ' to calendar of ' +
nickname + '@' + domain)
2021-12-27 16:18:52 +00:00
add_person_to_calendar(base_dir, nickname, domain,
2021-12-27 19:26:54 +00:00
follow_nickname, follow_domain)
2020-11-23 15:07:55 +00:00
# add a default petname
2021-12-27 19:26:54 +00:00
_set_default_pet_name(base_dir, nickname, domain,
follow_nickname, follow_domain)
2019-07-06 19:24:52 +00:00
return True
2019-07-11 12:29:31 +00:00
2020-04-04 13:44:49 +00:00
2021-12-27 22:32:59 +00:00
def votes_on_newswire_item(status: []) -> int:
2020-10-08 19:47:23 +00:00
"""Returns the number of votes on a newswire item
"""
2021-12-27 22:32:59 +00:00
total_votes = 0
2020-10-08 19:47:23 +00:00
for line in status:
if 'vote:' in line:
2021-12-27 22:32:59 +00:00
total_votes += 1
return total_votes
2020-10-08 19:47:23 +00:00
2021-12-27 22:38:48 +00:00
def locate_news_votes(base_dir: str, domain: str,
post_url: str) -> str:
2020-10-08 19:47:23 +00:00
"""Returns the votes filename for a news post
within the news user account
"""
2021-12-27 22:38:48 +00:00
post_url = \
post_url.strip().replace('\n', '').replace('\r', '')
2020-10-08 19:47:23 +00:00
# if this post in the shared inbox?
2021-12-27 22:38:48 +00:00
post_url = remove_id_ending(post_url.strip()).replace('/', '#')
2020-10-08 19:47:23 +00:00
2021-12-27 22:38:48 +00:00
if post_url.endswith('.json'):
post_url = post_url + '.votes'
2020-10-08 19:47:23 +00:00
else:
2021-12-27 22:38:48 +00:00
post_url = post_url + '.json.votes'
2020-10-08 19:47:23 +00:00
2021-12-27 22:38:48 +00:00
account_dir = base_dir + '/accounts/news@' + domain + '/'
post_filename = account_dir + 'outbox/' + post_url
2021-12-26 23:41:34 +00:00
if os.path.isfile(post_filename):
return post_filename
2020-10-09 12:15:20 +00:00
return None
2021-12-27 22:46:10 +00:00
def locate_news_arrival(base_dir: str, domain: str,
post_url: str) -> str:
2020-10-09 12:15:20 +00:00
"""Returns the arrival time for a news post
within the news user account
"""
2021-12-27 22:46:10 +00:00
post_url = \
post_url.strip().replace('\n', '').replace('\r', '')
2020-10-09 12:15:20 +00:00
# if this post in the shared inbox?
2021-12-27 22:46:10 +00:00
post_url = remove_id_ending(post_url.strip()).replace('/', '#')
2020-10-09 12:15:20 +00:00
2021-12-27 22:46:10 +00:00
if post_url.endswith('.json'):
post_url = post_url + '.arrived'
2020-10-09 12:15:20 +00:00
else:
2021-12-27 22:46:10 +00:00
post_url = post_url + '.json.arrived'
2020-10-09 12:15:20 +00:00
2021-12-27 22:46:10 +00:00
account_dir = base_dir + '/accounts/news@' + domain + '/'
post_filename = account_dir + 'outbox/' + post_url
2021-12-26 23:41:34 +00:00
if os.path.isfile(post_filename):
2021-12-27 22:46:10 +00:00
with open(post_filename, 'r') as arrival_file:
arrival = arrival_file.read()
if arrival:
2021-12-27 22:46:10 +00:00
arrival_date = \
datetime.datetime.strptime(arrival,
"%Y-%m-%dT%H:%M:%SZ")
2021-12-27 22:46:10 +00:00
return arrival_date
2020-10-09 12:15:20 +00:00
2020-10-08 19:47:23 +00:00
return None
2021-12-28 10:17:58 +00:00
def clear_from_post_caches(base_dir: str, recent_posts_cache: {},
post_id: str) -> None:
2020-10-18 16:19:28 +00:00
"""Clears cached html for the given post, so that edits
to news will appear
"""
2021-12-26 19:47:06 +00:00
filename = '/postcache/' + post_id + '.html'
2021-12-25 16:17:53 +00:00
for subdir, dirs, files in os.walk(base_dir + '/accounts'):
2020-10-18 16:19:28 +00:00
for acct in dirs:
if '@' not in acct:
continue
2021-04-21 16:09:56 +00:00
if acct.startswith('inbox@'):
2020-10-18 16:19:28 +00:00
continue
2021-12-28 10:17:58 +00:00
cache_dir = os.path.join(base_dir + '/accounts', acct)
post_filename = cache_dir + filename
2021-12-26 23:41:34 +00:00
if os.path.isfile(post_filename):
2020-10-18 16:19:28 +00:00
try:
2021-12-26 23:41:34 +00:00
os.remove(post_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-28 10:17:58 +00:00
print('EX: clear_from_post_caches file not removed ' +
2021-12-26 23:41:34 +00:00
str(post_filename))
# if the post is in the recent posts cache then remove it
2021-12-26 20:01:37 +00:00
if recent_posts_cache.get('index'):
if post_id in recent_posts_cache['index']:
recent_posts_cache['index'].remove(post_id)
if recent_posts_cache.get('json'):
if recent_posts_cache['json'].get(post_id):
del recent_posts_cache['json'][post_id]
if recent_posts_cache.get('html'):
if recent_posts_cache['html'].get(post_id):
del recent_posts_cache['html'][post_id]
2020-12-13 22:13:45 +00:00
break
2020-10-18 16:19:28 +00:00
2021-12-26 20:36:08 +00:00
def locate_post(base_dir: str, nickname: str, domain: str,
2021-12-28 12:15:46 +00:00
post_url: str, replies: bool = False) -> str:
2019-07-11 12:29:31 +00:00
"""Returns the filename for the given status post url
"""
2019-07-13 19:28:14 +00:00
if not replies:
2020-04-04 13:44:49 +00:00
extension = 'json'
2019-07-13 19:28:14 +00:00
else:
2020-04-04 13:44:49 +00:00
extension = 'replies'
2019-11-18 14:42:18 +00:00
2019-07-11 19:31:02 +00:00
# if this post in the shared inbox?
2021-12-28 12:15:46 +00:00
post_url = remove_id_ending(post_url.strip()).replace('/', '#')
2019-11-18 14:42:18 +00:00
2020-05-18 10:19:31 +00:00
# add the extension
2021-12-28 12:15:46 +00:00
post_url = post_url + '.' + extension
2020-05-18 10:19:31 +00:00
2020-05-18 10:14:29 +00:00
# search boxes
boxes = ('inbox', 'outbox', 'tlblogs')
2021-12-28 10:17:58 +00:00
account_dir = acct_dir(base_dir, nickname, domain) + '/'
2021-12-28 12:15:46 +00:00
for box_name in boxes:
post_filename = account_dir + box_name + '/' + post_url
2021-12-26 23:41:34 +00:00
if os.path.isfile(post_filename):
return post_filename
2020-10-08 13:07:17 +00:00
# check news posts
2021-12-28 10:17:58 +00:00
account_dir = base_dir + '/accounts/news' + '@' + domain + '/'
2021-12-28 12:15:46 +00:00
post_filename = account_dir + 'outbox/' + post_url
2021-12-26 23:41:34 +00:00
if os.path.isfile(post_filename):
return post_filename
2020-05-18 10:14:29 +00:00
# is it in the announce cache?
2021-12-28 12:15:46 +00:00
post_filename = base_dir + '/cache/announce/' + nickname + '/' + post_url
2021-12-26 23:41:34 +00:00
if os.path.isfile(post_filename):
return post_filename
2020-05-18 10:14:29 +00:00
2021-12-28 12:15:46 +00:00
# print('WARN: unable to locate ' + nickname + ' ' + post_url)
2019-11-18 14:42:18 +00:00
return None
2019-07-14 16:37:01 +00:00
2020-04-04 13:44:49 +00:00
2021-12-28 10:25:50 +00:00
def _get_published_date(post_json_object: {}) -> str:
"""Returns the published date on the given post
"""
published = None
2021-12-25 22:09:19 +00:00
if post_json_object.get('published'):
published = post_json_object['published']
2021-12-26 10:57:03 +00:00
elif has_object_dict(post_json_object):
2021-12-25 22:09:19 +00:00
if post_json_object['object'].get('published'):
published = post_json_object['object']['published']
if not published:
return None
if not isinstance(published, str):
return None
return published
2021-12-28 10:25:50 +00:00
def get_reply_interval_hours(base_dir: str, nickname: str, domain: str,
default_reply_interval_hrs: int) -> int:
"""Returns the reply interval for the given account.
The reply interval is the number of hours after a post being made
during which replies are allowed
"""
2021-12-28 10:25:50 +00:00
reply_interval_filename = \
2021-12-28 12:15:46 +00:00
acct_dir(base_dir, nickname, domain) + '/.reply_interval_hours'
2021-12-28 10:25:50 +00:00
if os.path.isfile(reply_interval_filename):
with open(reply_interval_filename, 'r') as interval_file:
hours_str = interval_file.read()
if hours_str.isdigit():
return int(hours_str)
2021-12-25 17:31:22 +00:00
return default_reply_interval_hrs
2021-12-28 12:15:46 +00:00
def set_reply_interval_hours(base_dir: str, nickname: str, domain: str,
reply_interval_hours: int) -> bool:
"""Sets the reply interval for the given account.
The reply interval is the number of hours after a post being made
during which replies are allowed
"""
2021-12-28 10:25:50 +00:00
reply_interval_filename = \
2021-12-28 12:15:46 +00:00
acct_dir(base_dir, nickname, domain) + '/.reply_interval_hours'
try:
with open(reply_interval_filename, 'w+') as interval_file:
interval_file.write(str(reply_interval_hours))
return True
2021-12-28 12:15:46 +00:00
except OSError:
print('EX: set_reply_interval_hours unable to save reply interval ' +
str(reply_interval_filename) + ' ' +
str(reply_interval_hours))
return False
2021-12-28 12:15:46 +00:00
def can_reply_to(base_dir: str, nickname: str, domain: str,
post_url: str, reply_interval_hours: int,
curr_date_str: str = None,
post_json_object: {} = None) -> bool:
"""Is replying to the given post permitted?
This is a spam mitigation feature, so that spammers can't
add a lot of replies to old post which you don't notice.
"""
2021-12-28 12:15:46 +00:00
if '/statuses/' not in post_url:
return True
2021-12-25 22:09:19 +00:00
if not post_json_object:
2021-12-28 12:15:46 +00:00
post_filename = locate_post(base_dir, nickname, domain, post_url)
2021-12-26 23:41:34 +00:00
if not post_filename:
2021-09-08 20:12:03 +00:00
return False
2021-12-26 23:41:34 +00:00
post_json_object = load_json(post_filename)
2021-12-25 22:09:19 +00:00
if not post_json_object:
return False
2021-12-28 10:25:50 +00:00
published = _get_published_date(post_json_object)
if not published:
return False
try:
2021-12-28 12:15:46 +00:00
pub_date = datetime.datetime.strptime(published, '%Y-%m-%dT%H:%M:%SZ')
except BaseException:
2021-12-28 12:15:46 +00:00
print('EX: can_reply_to unrecognized published date ' + str(published))
return False
2021-12-28 12:15:46 +00:00
if not curr_date_str:
curr_date = datetime.datetime.utcnow()
else:
try:
2021-12-28 12:15:46 +00:00
curr_date = \
datetime.datetime.strptime(curr_date_str, '%Y-%m-%dT%H:%M:%SZ')
except BaseException:
2021-12-28 12:15:46 +00:00
print('EX: can_reply_to unrecognized current date ' +
str(curr_date_str))
return False
2021-12-28 12:15:46 +00:00
hours_since_publication = \
int((curr_date - pub_date).total_seconds() / 3600)
if hours_since_publication < 0 or \
hours_since_publication >= reply_interval_hours:
return False
return True
2021-12-28 13:49:44 +00:00
def _remove_attachment(base_dir: str, http_prefix: str, domain: str,
post_json: {}):
2021-12-28 12:15:46 +00:00
if not post_json.get('attachment'):
2019-07-14 16:57:06 +00:00
return
2021-12-28 12:15:46 +00:00
if not post_json['attachment'][0].get('url'):
2019-07-14 16:57:06 +00:00
return
2021-12-28 12:15:46 +00:00
attachment_url = post_json['attachment'][0]['url']
if not attachment_url:
2019-07-14 16:57:06 +00:00
return
2021-12-28 12:15:46 +00:00
media_filename = base_dir + '/' + \
attachment_url.replace(http_prefix + '://' + domain + '/', '')
if os.path.isfile(media_filename):
try:
2021-12-28 12:15:46 +00:00
os.remove(media_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-28 13:49:44 +00:00
print('EX: _remove_attachment unable to delete media file ' +
2021-12-28 12:15:46 +00:00
str(media_filename))
etag_filename = media_filename + '.etag'
if os.path.isfile(etag_filename):
try:
2021-12-28 12:15:46 +00:00
os.remove(etag_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-28 13:49:44 +00:00
print('EX: _remove_attachment unable to delete etag file ' +
2021-12-28 12:15:46 +00:00
str(etag_filename))
post_json['attachment'] = []
2020-04-04 13:44:49 +00:00
2019-07-14 16:57:06 +00:00
2021-12-28 13:12:10 +00:00
def remove_moderation_post_from_index(base_dir: str, post_url: str,
debug: bool) -> None:
2019-08-12 18:02:29 +00:00
"""Removes a url from the moderation index
"""
2021-12-26 19:47:06 +00:00
moderation_index_file = base_dir + '/accounts/moderation.txt'
if not os.path.isfile(moderation_index_file):
2019-08-12 18:02:29 +00:00
return
2021-12-28 12:15:46 +00:00
post_id = remove_id_ending(post_url)
2021-12-26 19:47:06 +00:00
if post_id in open(moderation_index_file).read():
2021-12-28 12:15:46 +00:00
with open(moderation_index_file, 'r') as file1:
lines = file1.readlines()
with open(moderation_index_file, 'w+') as file2:
2019-08-12 18:02:29 +00:00
for line in lines:
2021-12-26 19:47:06 +00:00
if line.strip("\n").strip("\r") != post_id:
2021-12-28 12:15:46 +00:00
file2.write(line)
continue
if debug:
print('DEBUG: removed ' + post_id +
' from moderation index')
2020-04-04 13:44:49 +00:00
2019-08-12 18:02:29 +00:00
2021-12-26 19:36:40 +00:00
def _is_reply_to_blog_post(base_dir: str, nickname: str, domain: str,
post_json_object: str):
"""Is the given post a reply to a blog post?
"""
2021-12-26 10:57:03 +00:00
if not has_object_dict(post_json_object):
return False
2021-12-25 22:09:19 +00:00
if not post_json_object['object'].get('inReplyTo'):
return False
2021-12-25 22:09:19 +00:00
if not isinstance(post_json_object['object']['inReplyTo'], str):
2020-08-28 14:45:07 +00:00
return False
2021-12-26 19:36:40 +00:00
blogs_index_filename = \
2021-12-26 12:02:29 +00:00
acct_dir(base_dir, nickname, domain) + '/tlblogs.index'
2021-12-26 19:36:40 +00:00
if not os.path.isfile(blogs_index_filename):
return False
2021-12-27 11:20:57 +00:00
post_id = remove_id_ending(post_json_object['object']['inReplyTo'])
2021-12-26 19:36:40 +00:00
post_id = post_id.replace('/', '#')
if post_id in open(blogs_index_filename).read():
return True
return False
2021-12-28 14:55:45 +00:00
def _delete_post_remove_replies(base_dir: str, nickname: str, domain: str,
http_prefix: str, post_filename: str,
recent_posts_cache: {}, debug: bool) -> None:
2021-07-05 09:24:29 +00:00
"""Removes replies when deleting a post
"""
2021-12-28 12:15:46 +00:00
replies_filename = post_filename.replace('.json', '.replies')
if not os.path.isfile(replies_filename):
2021-07-05 09:24:29 +00:00
return
if debug:
2021-12-26 23:41:34 +00:00
print('DEBUG: removing replies to ' + post_filename)
2021-12-28 12:15:46 +00:00
with open(replies_filename, 'r') as replies_file:
for reply_id in replies_file:
reply_file = locate_post(base_dir, nickname, domain, reply_id)
if not reply_file:
2021-07-05 09:24:29 +00:00
continue
2021-12-28 12:15:46 +00:00
if os.path.isfile(reply_file):
2021-12-28 14:55:45 +00:00
delete_post(base_dir, http_prefix,
nickname, domain, reply_file, debug,
recent_posts_cache)
2021-07-05 09:24:29 +00:00
# remove the replies file
try:
2021-12-28 12:15:46 +00:00
os.remove(replies_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-28 14:55:45 +00:00
print('EX: _delete_post_remove_replies ' +
'unable to delete replies file ' + str(replies_filename))
2021-07-05 09:24:29 +00:00
2021-12-28 13:12:10 +00:00
def _is_bookmarked(base_dir: str, nickname: str, domain: str,
post_filename: str) -> bool:
2021-07-05 09:24:29 +00:00
"""Returns True if the given post is bookmarked
"""
2021-12-28 12:15:46 +00:00
bookmarks_index_filename = \
2021-12-26 12:02:29 +00:00
acct_dir(base_dir, nickname, domain) + '/bookmarks.index'
2021-12-28 12:15:46 +00:00
if os.path.isfile(bookmarks_index_filename):
bookmark_index = post_filename.split('/')[-1] + '\n'
if bookmark_index in open(bookmarks_index_filename).read():
2021-07-05 09:24:29 +00:00
return True
return False
2021-12-27 11:05:24 +00:00
def remove_post_from_cache(post_json_object: {},
recent_posts_cache: {}) -> None:
2021-07-05 10:09:11 +00:00
""" if the post exists in the recent posts cache then remove it
2021-07-05 09:45:55 +00:00
"""
2021-12-26 20:01:37 +00:00
if not recent_posts_cache:
2021-07-05 09:45:55 +00:00
return
2021-12-25 22:09:19 +00:00
if not post_json_object.get('id'):
2021-07-05 10:09:11 +00:00
return
2021-12-26 20:01:37 +00:00
if not recent_posts_cache.get('index'):
2021-07-05 10:09:11 +00:00
return
2021-12-26 19:47:06 +00:00
post_id = post_json_object['id']
if '#' in post_id:
post_id = post_id.split('#', 1)[0]
2021-12-27 11:20:57 +00:00
post_id = remove_id_ending(post_id).replace('/', '#')
2021-12-26 20:01:37 +00:00
if post_id not in recent_posts_cache['index']:
2021-07-05 10:09:11 +00:00
return
2021-07-05 09:45:55 +00:00
2021-12-26 20:01:37 +00:00
if recent_posts_cache.get('index'):
if post_id in recent_posts_cache['index']:
recent_posts_cache['index'].remove(post_id)
2021-07-05 09:45:55 +00:00
2021-12-26 20:01:37 +00:00
if recent_posts_cache.get('json'):
if recent_posts_cache['json'].get(post_id):
del recent_posts_cache['json'][post_id]
2021-07-05 09:45:55 +00:00
2021-12-26 20:01:37 +00:00
if recent_posts_cache.get('html'):
if recent_posts_cache['html'].get(post_id):
del recent_posts_cache['html'][post_id]
2021-07-05 09:45:55 +00:00
2021-12-28 14:55:45 +00:00
def _delete_cached_html(base_dir: str, nickname: str, domain: str,
post_json_object: {}):
2021-07-05 09:45:55 +00:00
"""Removes cached html file for the given post
"""
2021-12-27 11:05:24 +00:00
cached_post_filename = \
2021-12-26 23:41:34 +00:00
get_cached_post_filename(base_dir, nickname, domain, post_json_object)
2021-12-27 11:05:24 +00:00
if cached_post_filename:
if os.path.isfile(cached_post_filename):
try:
2021-12-27 11:05:24 +00:00
os.remove(cached_post_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-28 14:55:45 +00:00
print('EX: _delete_cached_html ' +
2021-10-29 18:48:15 +00:00
'unable to delete cached post file ' +
2021-12-27 11:05:24 +00:00
str(cached_post_filename))
2021-07-05 09:45:55 +00:00
2021-12-28 14:55:45 +00:00
def _delete_hashtags_on_post(base_dir: str, post_json_object: {}) -> None:
2021-07-05 09:45:55 +00:00
"""Removes hashtags when a post is deleted
"""
2021-12-28 12:15:46 +00:00
remove_hashtag_index = False
2021-12-26 10:57:03 +00:00
if has_object_dict(post_json_object):
2021-12-25 22:09:19 +00:00
if post_json_object['object'].get('content'):
if '#' in post_json_object['object']['content']:
2021-12-28 12:15:46 +00:00
remove_hashtag_index = True
2021-07-05 09:45:55 +00:00
2021-12-28 12:15:46 +00:00
if not remove_hashtag_index:
2021-07-05 09:45:55 +00:00
return
2021-12-25 22:09:19 +00:00
if not post_json_object['object'].get('id') or \
not post_json_object['object'].get('tag'):
2021-07-05 09:45:55 +00:00
return
# get the id of the post
2021-12-27 11:20:57 +00:00
post_id = remove_id_ending(post_json_object['object']['id'])
2021-12-25 22:09:19 +00:00
for tag in post_json_object['object']['tag']:
2021-10-30 19:26:52 +00:00
if not tag.get('type'):
continue
2021-07-05 09:45:55 +00:00
if tag['type'] != 'Hashtag':
continue
if not tag.get('name'):
continue
# find the index file for this tag
2021-12-28 12:15:46 +00:00
tag_index_filename = base_dir + '/tags/' + tag['name'][1:] + '.txt'
if not os.path.isfile(tag_index_filename):
2021-07-05 09:45:55 +00:00
continue
2021-12-26 19:47:06 +00:00
# remove post_id from the tag index file
2021-07-05 09:45:55 +00:00
lines = None
2021-12-28 12:15:46 +00:00
with open(tag_index_filename, 'r') as index_file:
lines = index_file.readlines()
2021-07-05 09:45:55 +00:00
if not lines:
continue
newlines = ''
2021-12-28 12:15:46 +00:00
for file_line in lines:
if post_id in file_line:
2021-07-05 09:55:01 +00:00
# skip over the deleted post
2021-07-05 09:45:55 +00:00
continue
2021-12-28 12:15:46 +00:00
newlines += file_line
2021-07-05 09:45:55 +00:00
if not newlines.strip():
# if there are no lines then remove the hashtag file
try:
2021-12-28 12:15:46 +00:00
os.remove(tag_index_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-28 14:55:45 +00:00
print('EX: _delete_hashtags_on_post ' +
'unable to delete tag index ' + str(tag_index_filename))
2021-07-05 09:45:55 +00:00
else:
2021-07-05 09:55:01 +00:00
# write the new hashtag index without the given post in it
2021-12-28 12:15:46 +00:00
with open(tag_index_filename, 'w+') as index_file:
index_file.write(newlines)
2021-07-05 09:45:55 +00:00
2021-12-29 21:55:09 +00:00
def _delete_conversation_post(base_dir: str, nickname: str, domain: str,
post_json_object: {}) -> None:
2021-08-12 10:22:04 +00:00
"""Deletes a post from a conversation
"""
2021-12-26 10:57:03 +00:00
if not has_object_dict(post_json_object):
2021-08-12 10:22:04 +00:00
return False
2021-12-25 22:09:19 +00:00
if not post_json_object['object'].get('conversation'):
2021-08-12 10:22:04 +00:00
return False
2021-12-25 22:09:19 +00:00
if not post_json_object['object'].get('id'):
2021-08-12 10:22:04 +00:00
return False
2021-12-28 12:15:46 +00:00
conversation_dir = \
acct_dir(base_dir, nickname, domain) + '/conversation'
conversation_id = post_json_object['object']['conversation']
conversation_id = conversation_id.replace('/', '#')
2021-12-26 19:47:06 +00:00
post_id = post_json_object['object']['id']
2021-12-28 12:15:46 +00:00
conversation_filename = conversation_dir + '/' + conversation_id
if not os.path.isfile(conversation_filename):
return False
conversation_str = ''
with open(conversation_filename, 'r') as conv_file:
conversation_str = conv_file.read()
if post_id + '\n' not in conversation_str:
return False
conversation_str = conversation_str.replace(post_id + '\n', '')
if conversation_str:
with open(conversation_filename, 'w+') as conv_file:
conv_file.write(conversation_str)
2021-08-12 10:22:04 +00:00
else:
2021-12-28 12:15:46 +00:00
if os.path.isfile(conversation_filename + '.muted'):
try:
2021-12-28 12:15:46 +00:00
os.remove(conversation_filename + '.muted')
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-29 21:55:09 +00:00
print('EX: _delete_conversation_post ' +
2021-10-29 18:48:15 +00:00
'unable to remove conversation ' +
2021-12-28 12:15:46 +00:00
str(conversation_filename) + '.muted')
try:
2021-12-28 12:15:46 +00:00
os.remove(conversation_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-29 21:55:09 +00:00
print('EX: _delete_conversation_post ' +
2021-10-29 18:48:15 +00:00
'unable to remove conversation ' +
2021-12-28 12:15:46 +00:00
str(conversation_filename))
2021-08-12 10:22:04 +00:00
2021-12-28 14:55:45 +00:00
def delete_post(base_dir: str, http_prefix: str,
nickname: str, domain: str, post_filename: str,
debug: bool, recent_posts_cache: {}) -> None:
2019-07-14 16:37:01 +00:00
"""Recursively deletes a post and its replies and attachments
"""
2021-12-26 23:41:34 +00:00
post_json_object = load_json(post_filename, 1)
2021-12-25 22:09:19 +00:00
if not post_json_object:
2021-07-05 09:24:29 +00:00
# remove any replies
2021-12-28 14:55:45 +00:00
_delete_post_remove_replies(base_dir, nickname, domain,
http_prefix, post_filename,
recent_posts_cache, debug)
2021-07-05 09:24:29 +00:00
# finally, remove the post itself
try:
2021-12-26 23:41:34 +00:00
os.remove(post_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-10-29 14:33:52 +00:00
if debug:
2021-12-28 14:55:45 +00:00
print('EX: delete_post unable to delete post ' +
2021-12-26 23:41:34 +00:00
str(post_filename))
2021-07-05 09:24:29 +00:00
return
2021-07-05 09:24:29 +00:00
# don't allow deletion of bookmarked posts
2021-12-28 13:12:10 +00:00
if _is_bookmarked(base_dir, nickname, domain, post_filename):
2021-07-05 09:24:29 +00:00
return
# don't remove replies to blog posts
2021-12-26 19:36:40 +00:00
if _is_reply_to_blog_post(base_dir, nickname, domain,
post_json_object):
2021-07-05 09:24:29 +00:00
return
# remove from recent posts cache in memory
2021-12-27 11:05:24 +00:00
remove_post_from_cache(post_json_object, recent_posts_cache)
2021-07-05 09:24:29 +00:00
2021-08-12 10:22:04 +00:00
# remove from conversation index
2021-12-29 21:55:09 +00:00
_delete_conversation_post(base_dir, nickname, domain, post_json_object)
2021-08-12 10:22:04 +00:00
2021-07-05 09:24:29 +00:00
# remove any attachment
2021-12-28 13:49:44 +00:00
_remove_attachment(base_dir, http_prefix, domain, post_json_object)
2021-07-05 09:24:29 +00:00
extensions = ('votes', 'arrived', 'muted', 'tts', 'reject')
for ext in extensions:
2021-12-28 12:15:46 +00:00
ext_filename = post_filename + '.' + ext
if os.path.isfile(ext_filename):
try:
2021-12-28 12:15:46 +00:00
os.remove(ext_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-28 14:55:45 +00:00
print('EX: delete_post unable to remove ext ' +
2021-12-28 12:15:46 +00:00
str(ext_filename))
2021-07-05 09:24:29 +00:00
# remove cached html version of the post
2021-12-28 14:55:45 +00:00
_delete_cached_html(base_dir, nickname, domain, post_json_object)
2021-07-05 09:24:29 +00:00
2021-12-26 23:53:16 +00:00
has_object = False
2021-12-25 22:09:19 +00:00
if post_json_object.get('object'):
2021-12-26 23:53:16 +00:00
has_object = True
2021-07-05 09:24:29 +00:00
# remove from moderation index file
2021-12-26 23:53:16 +00:00
if has_object:
2021-12-26 10:57:03 +00:00
if has_object_dict(post_json_object):
2021-12-25 22:09:19 +00:00
if post_json_object['object'].get('moderationStatus'):
if post_json_object.get('id'):
2021-12-27 11:20:57 +00:00
post_id = remove_id_ending(post_json_object['id'])
2021-12-28 13:12:10 +00:00
remove_moderation_post_from_index(base_dir, post_id, debug)
2021-07-05 09:24:29 +00:00
# remove any hashtags index entries
2021-12-26 23:53:16 +00:00
if has_object:
2021-12-28 14:55:45 +00:00
_delete_hashtags_on_post(base_dir, post_json_object)
2019-07-14 17:02:41 +00:00
# remove any replies
2021-12-28 14:55:45 +00:00
_delete_post_remove_replies(base_dir, nickname, domain,
http_prefix, post_filename,
recent_posts_cache, debug)
2019-07-14 17:02:41 +00:00
# finally, remove the post itself
try:
2021-12-26 23:41:34 +00:00
os.remove(post_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-10-29 14:33:52 +00:00
if debug:
2021-12-28 14:55:45 +00:00
print('EX: delete_post unable to delete post ' +
str(post_filename))
2019-07-27 22:48:34 +00:00
2020-04-04 13:44:49 +00:00
2021-12-28 14:55:45 +00:00
def is_valid_language(text: str) -> bool:
"""Returns true if the given text contains a valid
natural language string
"""
2021-12-28 12:15:46 +00:00
natural_languages = {
"Latin": [65, 866],
"Cyrillic": [1024, 1274],
"Greek": [880, 1280],
"isArmenian": [1328, 1424],
"isHebrew": [1424, 1536],
"Arabic": [1536, 1792],
"Syriac": [1792, 1872],
"Thaan": [1920, 1984],
"Devanagari": [2304, 2432],
"Bengali": [2432, 2560],
"Gurmukhi": [2560, 2688],
"Gujarati": [2688, 2816],
"Oriya": [2816, 2944],
"Tamil": [2944, 3072],
"Telugu": [3072, 3200],
"Kannada": [3200, 3328],
"Malayalam": [3328, 3456],
"Sinhala": [3456, 3584],
"Thai": [3584, 3712],
"Lao": [3712, 3840],
"Tibetan": [3840, 4096],
"Myanmar": [4096, 4256],
"Georgian": [4256, 4352],
"HangulJamo": [4352, 4608],
"Cherokee": [5024, 5120],
"UCAS": [5120, 5760],
"Ogham": [5760, 5792],
"Runic": [5792, 5888],
"Khmer": [6016, 6144],
"Mongolian": [6144, 6320]
}
2021-12-28 12:15:46 +00:00
for lang_name, lang_range in natural_languages.items():
ok_lang = True
for char in text:
if char.isdigit():
continue
2021-12-28 12:15:46 +00:00
if ord(char) not in range(lang_range[0], lang_range[1]):
ok_lang = False
break
2021-12-28 12:15:46 +00:00
if ok_lang:
return True
return False
2021-12-28 14:55:45 +00:00
def _get_reserved_words() -> str:
return ('inbox', 'dm', 'outbox', 'following',
'public', 'followers', 'category',
2021-09-13 13:57:37 +00:00
'channel', 'calendar', 'video-channels',
'tlreplies', 'tlmedia', 'tlblogs',
'tlblogs', 'tlfeatures',
'moderation', 'moderationaction',
'activity', 'undo', 'pinned',
2021-09-02 13:02:07 +00:00
'actor', 'Actor',
'reply', 'replies', 'question', 'like',
'likes', 'users', 'statuses', 'tags',
'accounts', 'headers',
'channels', 'profile', 'u', 'c',
'updates', 'repeat', 'announce',
'shares', 'fonts', 'icons', 'avatars',
'welcome', 'helpimages',
'bookmark', 'bookmarks', 'tlbookmarks',
'ignores', 'linksmobile', 'newswiremobile',
'minimal', 'search', 'eventdelete',
2021-08-08 17:05:26 +00:00
'searchemoji', 'catalog', 'conversationId',
2021-09-12 16:04:45 +00:00
'mention', 'http', 'https',
'ontologies', 'data')
2021-12-28 14:55:45 +00:00
def get_nickname_validation_pattern() -> str:
"""Returns a html text input validation pattern for nickname
"""
2021-12-28 14:55:45 +00:00
reserved_names = _get_reserved_words()
pattern = ''
2021-12-28 12:15:46 +00:00
for word in reserved_names:
if pattern:
2021-07-29 13:27:29 +00:00
pattern += '(?!.*\\b' + word + '\\b)'
else:
2021-07-29 13:27:29 +00:00
pattern = '^(?!.*\\b' + word + '\\b)'
return pattern + '.*${1,30}'
2021-12-28 14:55:45 +00:00
def _is_reserved_name(nickname: str) -> bool:
"""Is the given nickname reserved for some special function?
"""
2021-12-28 14:55:45 +00:00
reserved_names = _get_reserved_words()
2021-12-28 12:15:46 +00:00
if nickname in reserved_names:
return True
return False
2021-12-28 14:41:10 +00:00
def valid_nickname(domain: str, nickname: str) -> bool:
"""Is the given nickname valid?
"""
2021-07-29 14:24:29 +00:00
if len(nickname) == 0:
return False
if len(nickname) > 30:
return False
2021-12-28 14:55:45 +00:00
if not is_valid_language(nickname):
return False
2021-12-28 12:15:46 +00:00
forbidden_chars = ('.', ' ', '/', '?', ':', ';', '@', '#', '!')
2021-12-28 13:12:10 +00:00
for char in forbidden_chars:
if char in nickname:
return False
# this should only apply for the shared inbox
if nickname == domain:
return False
2021-12-28 14:55:45 +00:00
if _is_reserved_name(nickname):
2019-07-27 22:48:34 +00:00
return False
return True
2019-08-08 11:24:26 +00:00
2020-04-04 13:44:49 +00:00
2021-12-28 14:41:10 +00:00
def no_of_accounts(base_dir: str) -> bool:
2019-08-08 11:24:26 +00:00
"""Returns the number of accounts on the system
"""
2021-12-28 12:15:46 +00:00
account_ctr = 0
2021-12-25 16:17:53 +00:00
for subdir, dirs, files in os.walk(base_dir + '/accounts'):
2019-08-08 11:24:26 +00:00
for account in dirs:
2021-12-26 18:46:43 +00:00
if is_account_dir(account):
2021-12-28 12:15:46 +00:00
account_ctr += 1
2020-12-13 22:13:45 +00:00
break
2021-12-28 12:15:46 +00:00
return account_ctr
2019-08-10 11:31:42 +00:00
2020-04-04 13:44:49 +00:00
2021-12-28 14:41:10 +00:00
def no_of_active_accounts_monthly(base_dir: str, months: int) -> bool:
2019-11-13 15:15:08 +00:00
"""Returns the number of accounts on the system this month
"""
2021-12-28 12:15:46 +00:00
account_ctr = 0
2021-12-26 13:17:46 +00:00
curr_time = int(time.time())
2021-12-28 12:15:46 +00:00
month_seconds = int(60*60*24*30*months)
2021-12-25 16:17:53 +00:00
for subdir, dirs, files in os.walk(base_dir + '/accounts'):
2019-11-13 15:15:08 +00:00
for account in dirs:
2021-12-26 18:46:43 +00:00
if not is_account_dir(account):
continue
2021-12-28 12:15:46 +00:00
last_used_filename = \
2021-12-25 16:17:53 +00:00
base_dir + '/accounts/' + account + '/.lastUsed'
2021-12-28 12:15:46 +00:00
if not os.path.isfile(last_used_filename):
continue
2021-12-28 12:15:46 +00:00
with open(last_used_filename, 'r') as last_used_file:
last_used = last_used_file.read()
if last_used.isdigit():
time_diff = (curr_time - int(last_used))
if time_diff < month_seconds:
account_ctr += 1
2020-12-13 22:13:45 +00:00
break
2021-12-28 12:15:46 +00:00
return account_ctr
2019-11-13 15:15:08 +00:00
2020-04-04 13:44:49 +00:00
2021-12-28 14:41:10 +00:00
def is_public_post_from_url(base_dir: str, nickname: str, domain: str,
post_url: str) -> bool:
"""Returns whether the given url is a public post
"""
2021-12-28 12:15:46 +00:00
post_filename = locate_post(base_dir, nickname, domain, post_url)
2021-12-26 23:41:34 +00:00
if not post_filename:
return False
2021-12-26 23:41:34 +00:00
post_json_object = load_json(post_filename, 1)
2021-12-25 22:09:19 +00:00
if not post_json_object:
return False
2021-12-28 14:41:10 +00:00
return is_public_post(post_json_object)
2020-04-04 13:44:49 +00:00
2021-12-28 14:41:10 +00:00
def is_public_post(post_json_object: {}) -> bool:
2019-08-10 11:31:42 +00:00
"""Returns true if the given post is public
"""
2021-12-25 22:09:19 +00:00
if not post_json_object.get('type'):
2019-08-10 11:31:42 +00:00
return False
2021-12-25 22:09:19 +00:00
if post_json_object['type'] != 'Create':
2019-08-10 11:31:42 +00:00
return False
2021-12-26 10:57:03 +00:00
if not has_object_dict(post_json_object):
2019-08-10 11:31:42 +00:00
return False
2021-12-25 22:09:19 +00:00
if not post_json_object['object'].get('to'):
2019-08-10 11:31:42 +00:00
return False
2021-12-25 22:09:19 +00:00
for recipient in post_json_object['object']['to']:
2019-08-10 11:31:42 +00:00
if recipient.endswith('#Public'):
return True
return False
2019-09-29 18:48:34 +00:00
2020-04-04 13:44:49 +00:00
2021-06-20 11:28:35 +00:00
def copytree(src: str, dst: str, symlinks: str = False, ignore: bool = None):
2019-09-29 18:48:34 +00:00
"""Copy a directory
"""
for item in os.listdir(src):
2021-12-28 12:15:46 +00:00
s_dir = os.path.join(src, item)
d_dir = os.path.join(dst, item)
if os.path.isdir(s_dir):
shutil.copytree(s_dir, d_dir, symlinks, ignore)
2019-09-29 18:48:34 +00:00
else:
2021-12-28 12:15:46 +00:00
shutil.copy2(s_dir, d_dir)
2019-10-19 17:50:05 +00:00
2020-04-04 13:44:49 +00:00
2021-12-26 23:53:16 +00:00
def get_cached_post_directory(base_dir: str,
nickname: str, domain: str) -> str:
2019-10-19 17:50:05 +00:00
"""Returns the directory where the html post cache exists
"""
2021-12-26 23:53:16 +00:00
html_post_cache_dir = acct_dir(base_dir, nickname, domain) + '/postcache'
return html_post_cache_dir
2019-10-19 17:50:05 +00:00
2020-04-04 13:44:49 +00:00
2021-12-26 23:41:34 +00:00
def get_cached_post_filename(base_dir: str, nickname: str, domain: str,
post_json_object: {}) -> str:
2019-10-19 17:50:05 +00:00
"""Returns the html cache filename for the given post
"""
2021-12-28 12:15:46 +00:00
cached_post_dir = get_cached_post_directory(base_dir, nickname, domain)
if not os.path.isdir(cached_post_dir):
# print('ERROR: invalid html cache directory ' + cached_post_dir)
2019-11-29 23:04:37 +00:00
return None
2021-12-28 12:15:46 +00:00
if '@' not in cached_post_dir:
# print('ERROR: invalid html cache directory ' + cached_post_dir)
2019-11-29 23:04:37 +00:00
return None
2021-12-28 12:15:46 +00:00
cached_post_id = remove_id_ending(post_json_object['id'])
cached_post_filename = \
cached_post_dir + '/' + cached_post_id.replace('/', '#')
2021-12-27 11:05:24 +00:00
return cached_post_filename + '.html'
2019-11-24 13:46:28 +00:00
2020-04-04 13:44:49 +00:00
2021-12-28 14:24:14 +00:00
def update_recent_posts_cache(recent_posts_cache: {}, max_recent_posts: int,
post_json_object: {}, html_str: str) -> None:
"""Store recent posts in memory so that they can be quickly recalled
"""
2021-12-25 22:09:19 +00:00
if not post_json_object.get('id'):
return
2021-12-26 19:47:06 +00:00
post_id = post_json_object['id']
if '#' in post_id:
post_id = post_id.split('#', 1)[0]
2021-12-27 11:20:57 +00:00
post_id = remove_id_ending(post_id).replace('/', '#')
2021-12-26 20:01:37 +00:00
if recent_posts_cache.get('index'):
if post_id in recent_posts_cache['index']:
return
2021-12-26 20:01:37 +00:00
recent_posts_cache['index'].append(post_id)
2021-12-25 22:09:19 +00:00
post_json_object['muted'] = False
2021-12-26 20:01:37 +00:00
recent_posts_cache['json'][post_id] = json.dumps(post_json_object)
2021-12-28 14:24:14 +00:00
recent_posts_cache['html'][post_id] = html_str
2021-12-26 20:01:37 +00:00
while len(recent_posts_cache['html'].items()) > max_recent_posts:
post_id = recent_posts_cache['index'][0]
recent_posts_cache['index'].pop(0)
if recent_posts_cache['json'].get(post_id):
del recent_posts_cache['json'][post_id]
if recent_posts_cache['html'].get(post_id):
del recent_posts_cache['html'][post_id]
else:
2021-12-26 20:01:37 +00:00
recent_posts_cache['index'] = [post_id]
recent_posts_cache['json'] = {}
recent_posts_cache['html'] = {}
recent_posts_cache['json'][post_id] = json.dumps(post_json_object)
2021-12-28 14:24:14 +00:00
recent_posts_cache['html'][post_id] = html_str
2020-04-04 13:44:49 +00:00
2020-02-21 10:19:02 +00:00
2021-12-28 14:01:37 +00:00
def file_last_modified(filename: str) -> str:
2020-02-21 10:19:02 +00:00
"""Returns the date when a file was last modified
"""
2021-12-28 12:15:46 +00:00
time_val = os.path.getmtime(filename)
modified_time = datetime.datetime.fromtimestamp(time_val)
return modified_time.strftime("%Y-%m-%dT%H:%M:%SZ")
2020-02-22 16:00:27 +00:00
2020-04-04 13:44:49 +00:00
2021-12-28 13:56:43 +00:00
def get_css(base_dir: str, css_filename: str, css_cache: {}) -> str:
2020-10-29 12:48:58 +00:00
"""Retrieves the css for a given file, or from a cache
"""
# does the css file exist?
2021-12-28 12:15:46 +00:00
if not os.path.isfile(css_filename):
2020-10-29 12:48:58 +00:00
return None
2021-12-28 14:01:37 +00:00
last_modified = file_last_modified(css_filename)
2020-10-29 12:48:58 +00:00
# has this already been loaded into the cache?
2021-12-28 13:56:43 +00:00
if css_cache.get(css_filename):
if css_cache[css_filename][0] == last_modified:
2020-10-29 12:48:58 +00:00
# file hasn't changed, so return the version in the cache
2021-12-28 13:56:43 +00:00
return css_cache[css_filename][1]
2020-10-29 12:48:58 +00:00
2021-12-28 12:15:46 +00:00
with open(css_filename, 'r') as fp_css:
css = fp_css.read()
2021-12-28 13:56:43 +00:00
if css_cache.get(css_filename):
2020-10-29 12:48:58 +00:00
# alter the cache contents
2021-12-28 13:56:43 +00:00
css_cache[css_filename][0] = last_modified
css_cache[css_filename][1] = css
2020-10-29 12:48:58 +00:00
else:
# add entry to the cache
2021-12-28 13:56:43 +00:00
css_cache[css_filename] = [last_modified, css]
2020-10-29 12:48:58 +00:00
return css
return None
2021-12-28 13:49:44 +00:00
def is_blog_post(post_json_object: {}) -> bool:
2020-02-24 23:14:49 +00:00
"""Is the given post a blog post?
"""
2021-12-25 22:09:19 +00:00
if post_json_object['type'] != 'Create':
2020-02-24 23:14:49 +00:00
return False
2021-12-26 10:57:03 +00:00
if not has_object_dict(post_json_object):
2020-02-24 23:14:49 +00:00
return False
2021-12-26 17:12:07 +00:00
if not has_object_stringType(post_json_object, False):
2020-02-24 23:14:49 +00:00
return False
2021-12-25 22:09:19 +00:00
if not post_json_object['object'].get('content'):
2020-02-24 23:14:49 +00:00
return False
2021-12-25 22:09:19 +00:00
if post_json_object['object']['type'] != 'Article':
2020-02-24 23:14:49 +00:00
return False
2020-03-22 21:16:02 +00:00
return True
2020-04-11 10:19:35 +00:00
2021-12-28 12:20:18 +00:00
def is_news_post(post_json_object: {}) -> bool:
"""Is the given post a blog post?
"""
2021-12-25 22:09:19 +00:00
return post_json_object.get('news')
2021-12-28 13:07:02 +00:00
def _search_virtual_box_posts(base_dir: str, nickname: str, domain: str,
search_str: str, max_results: int,
box_name: str) -> []:
2021-05-03 22:31:06 +00:00
"""Searches through a virtual box, which is typically an index on the inbox
"""
2021-12-28 12:15:46 +00:00
index_filename = \
acct_dir(base_dir, nickname, domain) + '/' + box_name + '.index'
if box_name == 'bookmarks':
box_name = 'inbox'
path = acct_dir(base_dir, nickname, domain) + '/' + box_name
2021-05-03 22:31:06 +00:00
if not os.path.isdir(path):
return []
2021-12-28 12:15:46 +00:00
search_str = search_str.lower().strip()
2021-05-03 22:31:06 +00:00
2021-12-28 12:15:46 +00:00
if '+' in search_str:
search_words = search_str.split('+')
for index in range(len(search_words)):
search_words[index] = search_words[index].strip()
print('SEARCH: ' + str(search_words))
2021-05-03 22:31:06 +00:00
else:
2021-12-28 12:15:46 +00:00
search_words = [search_str]
2021-05-03 22:31:06 +00:00
res = []
2021-12-28 12:15:46 +00:00
with open(index_filename, 'r') as index_file:
2021-12-26 23:41:34 +00:00
post_filename = 'start'
while post_filename:
2021-12-28 12:15:46 +00:00
post_filename = index_file.readline()
2021-12-26 23:41:34 +00:00
if not post_filename:
2021-05-03 22:31:06 +00:00
break
2021-12-26 23:41:34 +00:00
if '.json' not in post_filename:
2021-05-03 22:31:06 +00:00
break
2021-12-26 23:41:34 +00:00
post_filename = path + '/' + post_filename.strip()
if not os.path.isfile(post_filename):
2021-05-03 22:31:06 +00:00
continue
2021-12-28 12:15:46 +00:00
with open(post_filename, 'r') as post_file:
data = post_file.read().lower()
2021-12-28 12:15:46 +00:00
not_found = False
for keyword in search_words:
2021-05-03 22:31:06 +00:00
if keyword not in data:
2021-12-28 12:15:46 +00:00
not_found = True
2021-05-03 22:31:06 +00:00
break
2021-12-28 12:15:46 +00:00
if not_found:
2021-05-03 22:31:06 +00:00
continue
2021-12-26 23:41:34 +00:00
res.append(post_filename)
2021-12-28 12:15:46 +00:00
if len(res) >= max_results:
2021-05-03 22:31:06 +00:00
return res
return res
2021-12-28 13:07:02 +00:00
def search_box_posts(base_dir: str, nickname: str, domain: str,
search_str: str, max_results: int,
box_name='outbox') -> []:
2020-04-11 13:20:52 +00:00
"""Search your posts and return a list of the filenames
containing matching strings
2020-04-11 10:19:35 +00:00
"""
2021-12-28 12:15:46 +00:00
path = acct_dir(base_dir, nickname, domain) + '/' + box_name
2021-07-05 10:22:23 +00:00
# is this a virtual box, such as direct messages?
2020-04-11 10:19:35 +00:00
if not os.path.isdir(path):
2021-05-03 22:31:06 +00:00
if os.path.isfile(path + '.index'):
2021-12-28 13:07:02 +00:00
return _search_virtual_box_posts(base_dir, nickname, domain,
search_str, max_results, box_name)
2020-04-11 10:19:35 +00:00
return []
2021-12-28 12:15:46 +00:00
search_str = search_str.lower().strip()
2020-04-11 13:20:52 +00:00
2021-12-28 12:15:46 +00:00
if '+' in search_str:
search_words = search_str.split('+')
for index in range(len(search_words)):
search_words[index] = search_words[index].strip()
print('SEARCH: ' + str(search_words))
else:
2021-12-28 12:15:46 +00:00
search_words = [search_str]
2020-04-11 10:19:35 +00:00
res = []
for root, dirs, fnames in os.walk(path):
for fname in fnames:
2021-12-28 12:15:46 +00:00
file_path = os.path.join(root, fname)
with open(file_path, 'r') as post_file:
data = post_file.read().lower()
2021-12-28 12:15:46 +00:00
not_found = False
for keyword in search_words:
if keyword not in data:
2021-12-28 12:15:46 +00:00
not_found = True
2020-04-11 13:45:53 +00:00
break
2021-12-28 12:15:46 +00:00
if not_found:
2020-04-11 13:35:22 +00:00
continue
2020-04-11 13:14:53 +00:00
2021-12-28 12:15:46 +00:00
res.append(file_path)
if len(res) >= max_results:
2020-04-11 10:19:35 +00:00
return res
2020-12-13 22:13:45 +00:00
break
2020-04-11 10:19:35 +00:00
return res
2020-05-04 18:24:30 +00:00
2021-12-29 21:55:09 +00:00
def get_file_case_insensitive(path: str) -> str:
2020-05-04 18:24:30 +00:00
"""Returns a case specific filename given a case insensitive version of it
"""
2020-08-29 11:14:19 +00:00
if os.path.isfile(path):
return path
if path != path.lower():
if os.path.isfile(path.lower()):
return path.lower()
2020-08-29 19:54:30 +00:00
return None
2020-06-06 18:16:16 +00:00
2021-12-27 23:23:07 +00:00
def undo_likes_collection_entry(recent_posts_cache: {},
base_dir: str, post_filename: str,
object_url: str,
actor: str, domain: str, debug: bool,
post_json_object: {}) -> None:
2020-06-06 18:16:16 +00:00
"""Undoes a like for a particular actor
"""
2021-12-25 22:09:19 +00:00
if not post_json_object:
2021-12-26 23:41:34 +00:00
post_json_object = load_json(post_filename)
2021-12-25 22:09:19 +00:00
if not post_json_object:
2021-07-05 10:22:23 +00:00
return
# remove any cached version of this post so that the
# like icon is changed
2021-12-27 22:19:18 +00:00
nickname = get_nickname_from_actor(actor)
2021-12-27 11:05:24 +00:00
cached_post_filename = \
2021-12-26 23:41:34 +00:00
get_cached_post_filename(base_dir, nickname,
domain, post_json_object)
2021-12-27 11:05:24 +00:00
if cached_post_filename:
if os.path.isfile(cached_post_filename):
try:
2021-12-27 11:05:24 +00:00
os.remove(cached_post_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-27 23:23:07 +00:00
print('EX: undo_likes_collection_entry ' +
2021-10-29 18:48:15 +00:00
'unable to delete cached post ' +
2021-12-27 11:05:24 +00:00
str(cached_post_filename))
remove_post_from_cache(post_json_object, recent_posts_cache)
2021-07-05 10:22:23 +00:00
2021-12-25 22:09:19 +00:00
if not post_json_object.get('type'):
2021-07-05 10:22:23 +00:00
return
2021-12-25 22:09:19 +00:00
if post_json_object['type'] != 'Create':
2021-07-05 10:22:23 +00:00
return
2021-12-25 22:09:19 +00:00
obj = post_json_object
2021-12-26 10:57:03 +00:00
if has_object_dict(post_json_object):
2021-12-25 22:09:19 +00:00
obj = post_json_object['object']
2021-10-14 22:43:42 +00:00
if not obj.get('likes'):
2021-07-05 10:22:23 +00:00
return
2021-10-14 22:43:42 +00:00
if not isinstance(obj['likes'], dict):
2021-07-05 10:22:23 +00:00
return
2021-10-14 22:43:42 +00:00
if not obj['likes'].get('items'):
2021-07-05 10:22:23 +00:00
return
2021-12-27 23:23:07 +00:00
total_items = 0
2021-10-14 22:43:42 +00:00
if obj['likes'].get('totalItems'):
2021-12-27 23:23:07 +00:00
total_items = obj['likes']['totalItems']
2021-12-28 12:15:46 +00:00
item_found = False
for like_item in obj['likes']['items']:
if like_item.get('actor'):
if like_item['actor'] == actor:
2020-06-06 18:16:16 +00:00
if debug:
2021-07-05 10:22:23 +00:00
print('DEBUG: like was removed for ' + actor)
2021-12-28 12:15:46 +00:00
obj['likes']['items'].remove(like_item)
item_found = True
2021-07-05 10:22:23 +00:00
break
2021-12-28 12:15:46 +00:00
if not item_found:
2021-07-05 10:22:23 +00:00
return
2021-12-27 23:23:07 +00:00
if total_items == 1:
2021-07-05 10:22:23 +00:00
if debug:
print('DEBUG: likes was removed from post')
2021-10-14 22:43:42 +00:00
del obj['likes']
2021-07-05 10:22:23 +00:00
else:
2021-10-14 22:43:42 +00:00
itlen = len(obj['likes']['items'])
obj['likes']['totalItems'] = itlen
2020-06-06 18:16:16 +00:00
2021-12-26 23:41:34 +00:00
save_json(post_json_object, post_filename)
2020-06-06 18:16:16 +00:00
2021-12-27 23:02:50 +00:00
def undo_reaction_collection_entry(recent_posts_cache: {},
base_dir: str, post_filename: str,
2021-12-27 23:23:07 +00:00
object_url: str,
2021-12-27 23:02:50 +00:00
actor: str, domain: str, debug: bool,
post_json_object: {},
2021-12-28 12:15:46 +00:00
emoji_content: str) -> None:
2021-11-10 12:16:03 +00:00
"""Undoes an emoji reaction for a particular actor
"""
2021-12-25 22:09:19 +00:00
if not post_json_object:
2021-12-26 23:41:34 +00:00
post_json_object = load_json(post_filename)
2021-12-25 22:09:19 +00:00
if not post_json_object:
2021-11-10 12:16:03 +00:00
return
# remove any cached version of this post so that the
# like icon is changed
2021-12-27 22:19:18 +00:00
nickname = get_nickname_from_actor(actor)
2021-12-27 11:05:24 +00:00
cached_post_filename = \
2021-12-26 23:41:34 +00:00
get_cached_post_filename(base_dir, nickname,
domain, post_json_object)
2021-12-27 11:05:24 +00:00
if cached_post_filename:
if os.path.isfile(cached_post_filename):
2021-11-10 12:16:03 +00:00
try:
2021-12-27 11:05:24 +00:00
os.remove(cached_post_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-12-27 23:02:50 +00:00
print('EX: undo_reaction_collection_entry ' +
2021-11-10 12:16:03 +00:00
'unable to delete cached post ' +
2021-12-27 11:05:24 +00:00
str(cached_post_filename))
remove_post_from_cache(post_json_object, recent_posts_cache)
2021-11-10 12:16:03 +00:00
2021-12-25 22:09:19 +00:00
if not post_json_object.get('type'):
2021-11-10 12:16:03 +00:00
return
2021-12-25 22:09:19 +00:00
if post_json_object['type'] != 'Create':
2021-11-10 12:16:03 +00:00
return
2021-12-25 22:09:19 +00:00
obj = post_json_object
2021-12-26 10:57:03 +00:00
if has_object_dict(post_json_object):
2021-12-25 22:09:19 +00:00
obj = post_json_object['object']
2021-11-10 12:16:03 +00:00
if not obj.get('reactions'):
return
if not isinstance(obj['reactions'], dict):
return
if not obj['reactions'].get('items'):
return
2021-12-27 23:23:07 +00:00
total_items = 0
2021-11-10 12:16:03 +00:00
if obj['reactions'].get('totalItems'):
2021-12-27 23:23:07 +00:00
total_items = obj['reactions']['totalItems']
2021-12-28 12:15:46 +00:00
item_found = False
for like_item in obj['reactions']['items']:
if like_item.get('actor'):
if like_item['actor'] == actor and \
like_item['content'] == emoji_content:
2021-11-10 12:16:03 +00:00
if debug:
print('DEBUG: emoji reaction was removed for ' + actor)
2021-12-28 12:15:46 +00:00
obj['reactions']['items'].remove(like_item)
item_found = True
2021-11-10 12:16:03 +00:00
break
2021-12-28 12:15:46 +00:00
if not item_found:
2021-11-10 12:16:03 +00:00
return
2021-12-27 23:23:07 +00:00
if total_items == 1:
2021-11-10 12:16:03 +00:00
if debug:
print('DEBUG: emoji reaction was removed from post')
del obj['reactions']
else:
itlen = len(obj['reactions']['items'])
obj['reactions']['totalItems'] = itlen
2021-12-26 23:41:34 +00:00
save_json(post_json_object, post_filename)
2021-11-10 12:16:03 +00:00
2021-12-27 10:55:48 +00:00
def undo_announce_collection_entry(recent_posts_cache: {},
base_dir: str, post_filename: str,
actor: str, domain: str,
debug: bool) -> None:
"""Undoes an announce for a particular actor by removing it from
the "shares" collection within a post. Note that the "shares"
collection has no relation to shared items in shares.py. It's
shares of posts, not shares of physical objects.
"""
2021-12-26 23:41:34 +00:00
post_json_object = load_json(post_filename)
2021-12-25 22:09:19 +00:00
if not post_json_object:
2021-07-05 10:25:21 +00:00
return
# remove any cached version of this announce so that the announce
# icon is changed
2021-12-27 22:19:18 +00:00
nickname = get_nickname_from_actor(actor)
2021-12-27 11:05:24 +00:00
cached_post_filename = \
2021-12-26 23:41:34 +00:00
get_cached_post_filename(base_dir, nickname, domain,
post_json_object)
2021-12-27 11:05:24 +00:00
if cached_post_filename:
if os.path.isfile(cached_post_filename):
try:
2021-12-27 11:05:24 +00:00
os.remove(cached_post_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-10-29 14:33:52 +00:00
if debug:
2021-12-27 10:55:48 +00:00
print('EX: undo_announce_collection_entry ' +
2021-10-29 18:48:15 +00:00
'unable to delete cached post ' +
2021-12-27 11:05:24 +00:00
str(cached_post_filename))
remove_post_from_cache(post_json_object, recent_posts_cache)
2021-07-05 10:25:21 +00:00
2021-12-25 22:09:19 +00:00
if not post_json_object.get('type'):
2021-07-05 10:25:21 +00:00
return
2021-12-25 22:09:19 +00:00
if post_json_object['type'] != 'Create':
2021-07-05 10:25:21 +00:00
return
2021-12-26 10:57:03 +00:00
if not has_object_dict(post_json_object):
2021-07-05 10:25:21 +00:00
if debug:
2021-12-25 22:09:19 +00:00
pprint(post_json_object)
2021-07-05 10:25:21 +00:00
print('DEBUG: post has no object')
return
2021-12-25 22:09:19 +00:00
if not post_json_object['object'].get('shares'):
2021-07-05 10:25:21 +00:00
return
2021-12-25 22:09:19 +00:00
if not post_json_object['object']['shares'].get('items'):
2021-07-05 10:25:21 +00:00
return
2021-12-27 23:23:07 +00:00
total_items = 0
2021-12-25 22:09:19 +00:00
if post_json_object['object']['shares'].get('totalItems'):
2021-12-27 23:23:07 +00:00
total_items = post_json_object['object']['shares']['totalItems']
2021-12-28 12:15:46 +00:00
item_found = False
for announce_item in post_json_object['object']['shares']['items']:
if announce_item.get('actor'):
if announce_item['actor'] == actor:
if debug:
2021-07-05 10:25:21 +00:00
print('DEBUG: Announce was removed for ' + actor)
2021-12-28 13:07:02 +00:00
an_it = announce_item
post_json_object['object']['shares']['items'].remove(an_it)
2021-12-28 12:15:46 +00:00
item_found = True
2021-07-05 10:25:21 +00:00
break
2021-12-28 12:15:46 +00:00
if not item_found:
2021-07-05 10:25:21 +00:00
return
2021-12-27 23:23:07 +00:00
if total_items == 1:
2021-07-05 10:25:21 +00:00
if debug:
print('DEBUG: shares (announcements) ' +
'was removed from post')
2021-12-25 22:09:19 +00:00
del post_json_object['object']['shares']
2021-07-05 10:25:21 +00:00
else:
2021-12-25 22:09:19 +00:00
itlen = len(post_json_object['object']['shares']['items'])
post_json_object['object']['shares']['totalItems'] = itlen
2021-12-26 23:41:34 +00:00
save_json(post_json_object, post_filename)
2021-12-26 23:41:34 +00:00
def update_announce_collection(recent_posts_cache: {},
base_dir: str, post_filename: str,
actor: str, nickname: str, domain: str,
debug: bool) -> None:
"""Updates the announcements collection within a post
Confusingly this is known as "shares", but isn't the
same as shared items within shares.py
It's shares of posts, not shares of physical objects.
"""
2021-12-26 23:41:34 +00:00
post_json_object = load_json(post_filename)
2021-12-25 22:09:19 +00:00
if not post_json_object:
2021-05-07 15:58:39 +00:00
return
# remove any cached version of this announce so that the announce
# icon is changed
2021-12-27 11:05:24 +00:00
cached_post_filename = \
2021-12-26 23:41:34 +00:00
get_cached_post_filename(base_dir, nickname, domain,
post_json_object)
2021-12-27 11:05:24 +00:00
if cached_post_filename:
if os.path.isfile(cached_post_filename):
try:
2021-12-27 11:05:24 +00:00
os.remove(cached_post_filename)
2021-11-25 18:42:38 +00:00
except OSError:
2021-10-29 14:33:52 +00:00
if debug:
2021-12-26 23:41:34 +00:00
print('EX: update_announce_collection ' +
2021-10-29 18:48:15 +00:00
'unable to delete cached post ' +
2021-12-27 11:05:24 +00:00
str(cached_post_filename))
remove_post_from_cache(post_json_object, recent_posts_cache)
2021-12-26 10:57:03 +00:00
if not has_object_dict(post_json_object):
2021-05-07 15:58:39 +00:00
if debug:
2021-12-25 22:09:19 +00:00
pprint(post_json_object)
2021-12-26 23:41:34 +00:00
print('DEBUG: post ' + post_filename + ' has no object')
2021-05-07 15:58:39 +00:00
return
2021-12-28 12:15:46 +00:00
post_url = remove_id_ending(post_json_object['id']) + '/shares'
2021-12-25 22:09:19 +00:00
if not post_json_object['object'].get('shares'):
2021-05-07 15:58:39 +00:00
if debug:
print('DEBUG: Adding initial shares (announcements) to ' +
2021-12-28 12:15:46 +00:00
post_url)
announcements_json = {
2021-05-07 15:58:39 +00:00
"@context": "https://www.w3.org/ns/activitystreams",
2021-12-28 12:15:46 +00:00
'id': post_url,
2021-05-07 15:58:39 +00:00
'type': 'Collection',
"totalItems": 1,
'items': [{
'type': 'Announce',
'actor': actor
}]
}
2021-12-28 12:15:46 +00:00
post_json_object['object']['shares'] = announcements_json
2021-05-07 15:58:39 +00:00
else:
2021-12-25 22:09:19 +00:00
if post_json_object['object']['shares'].get('items'):
2021-12-28 12:15:46 +00:00
shares_items = post_json_object['object']['shares']['items']
for announce_item in shares_items:
if announce_item.get('actor'):
if announce_item['actor'] == actor:
2021-05-07 15:58:39 +00:00
return
2021-12-27 23:23:07 +00:00
new_announce = {
2021-05-07 15:58:39 +00:00
'type': 'Announce',
'actor': actor
}
2021-12-27 23:23:07 +00:00
post_json_object['object']['shares']['items'].append(new_announce)
2021-12-25 22:09:19 +00:00
itlen = len(post_json_object['object']['shares']['items'])
post_json_object['object']['shares']['totalItems'] = itlen
else:
2021-05-07 15:58:39 +00:00
if debug:
print('DEBUG: shares (announcements) section of post ' +
'has no items list')
2021-05-07 15:58:39 +00:00
if debug:
print('DEBUG: saving post with shares (announcements) added')
2021-12-25 22:09:19 +00:00
pprint(post_json_object)
2021-12-26 23:41:34 +00:00
save_json(post_json_object, post_filename)
2020-06-22 16:55:19 +00:00
2021-12-26 20:52:11 +00:00
def week_day_of_month_start(month_number: int, year: int) -> int:
2020-11-09 19:41:01 +00:00
"""Gets the day number of the first day of the month
1=sun, 7=sat
"""
2021-12-26 20:52:11 +00:00
first_day_of_month = datetime.datetime(year, month_number, 1, 0, 0)
return int(first_day_of_month.strftime("%w")) + 1
2020-11-13 13:34:14 +00:00
2021-12-26 20:48:15 +00:00
def media_file_mime_type(filename: str) -> str:
2020-11-13 13:34:14 +00:00
"""Given a media filename return its mime type
"""
if '.' not in filename:
return 'image/png'
extensions = {
'json': 'application/json',
'png': 'image/png',
'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',
'gif': 'image/gif',
2021-01-11 22:27:57 +00:00
'svg': 'image/svg+xml',
2020-11-13 13:34:14 +00:00
'webp': 'image/webp',
'avif': 'image/avif',
2021-12-16 23:47:01 +00:00
'ico': 'image/x-icon',
2020-11-13 13:34:14 +00:00
'mp3': 'audio/mpeg',
'ogg': 'audio/ogg',
2021-08-03 09:09:04 +00:00
'flac': 'audio/flac',
2020-11-13 13:34:14 +00:00
'mp4': 'video/mp4',
'ogv': 'video/ogv'
}
2021-12-26 20:52:11 +00:00
file_ext = filename.split('.')[-1]
if not extensions.get(file_ext):
2020-11-13 13:34:14 +00:00
return 'image/png'
2021-12-26 20:52:11 +00:00
return extensions[file_ext]
2021-12-26 20:48:15 +00:00
def is_recent_post(post_json_object: {}, max_days: int) -> bool:
""" Is the given post recent?
"""
2021-12-26 10:57:03 +00:00
if not has_object_dict(post_json_object):
return False
2021-12-25 22:09:19 +00:00
if not post_json_object['object'].get('published'):
return False
2021-12-25 22:09:19 +00:00
if not isinstance(post_json_object['object']['published'], str):
return False
2021-12-26 13:17:46 +00:00
curr_time = datetime.datetime.utcnow()
days_since_epoch = (curr_time - datetime.datetime(1970, 1, 1)).days
2021-12-26 20:48:15 +00:00
recently = days_since_epoch - max_days
2021-12-26 20:48:15 +00:00
published_date_str = post_json_object['object']['published']
try:
2021-12-26 20:48:15 +00:00
published_date = \
datetime.datetime.strptime(published_date_str,
"%Y-%m-%dT%H:%M:%SZ")
except BaseException:
2021-12-26 20:43:03 +00:00
print('EX: is_recent_post unrecognized published date ' +
2021-12-26 20:48:15 +00:00
str(published_date_str))
return False
2021-12-26 23:41:34 +00:00
published_days_since_epoch = \
2021-12-26 20:48:15 +00:00
(published_date - datetime.datetime(1970, 1, 1)).days
2021-12-26 23:41:34 +00:00
if published_days_since_epoch < recently:
return False
return True
2021-12-26 20:39:35 +00:00
def camel_case_split(text: str) -> str:
""" Splits CamelCase into "Camel Case"
"""
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|' +
'(?<=[A-Z])(?=[A-Z][a-z])|$)', text)
if not matches:
return text
2021-12-28 13:07:02 +00:00
result_str = ''
for word in matches:
2021-12-28 13:07:02 +00:00
result_str += word.group(0) + ' '
return result_str.strip()
2021-03-05 19:00:37 +00:00
2021-12-29 10:39:46 +00:00
def convert_to_snake_case(text: str) -> str:
"""Convert camel case to snake case
"""
return camel_case_split(text).lower().replace(' ', '_')
def _convert_to_camel_case(text: str) -> str:
"""Convers a snake case string to camel case
"""
if '_' not in text:
return text
words = text.split('_')
result = ''
ctr = 0
for wrd in words:
if ctr > 0:
result += wrd.title()
else:
result = wrd.lower()
ctr += 1
return result
2021-12-26 20:20:36 +00:00
def reject_post_id(base_dir: str, nickname: str, domain: str,
post_id: str, recent_posts_cache: {}) -> None:
""" Marks the given post as rejected,
for example an announce which is too old
2021-03-05 19:00:37 +00:00
"""
2021-12-26 20:43:03 +00:00
post_filename = locate_post(base_dir, nickname, domain, post_id)
if not post_filename:
2021-03-05 19:00:37 +00:00
return
2021-12-26 20:01:37 +00:00
if recent_posts_cache.get('index'):
# if this is a full path then remove the directories
2021-12-26 20:43:03 +00:00
index_filename = post_filename
if '/' in post_filename:
index_filename = post_filename.split('/')[-1]
# filename of the post without any extension or path
# This should also correspond to any index entry in
# the posts cache
2021-12-28 12:15:46 +00:00
post_url = \
2021-12-26 20:43:03 +00:00
index_filename.replace('\n', '').replace('\r', '')
2021-12-28 12:15:46 +00:00
post_url = post_url.replace('.json', '').strip()
2021-12-28 12:15:46 +00:00
if post_url in recent_posts_cache['index']:
if recent_posts_cache['json'].get(post_url):
del recent_posts_cache['json'][post_url]
if recent_posts_cache['html'].get(post_url):
del recent_posts_cache['html'][post_url]
2021-12-28 12:15:46 +00:00
with open(post_filename + '.reject', 'w+') as reject_file:
reject_file.write('\n')
2021-12-26 20:12:18 +00:00
def is_dm(post_json_object: {}) -> bool:
"""Returns true if the given post is a DM
"""
2021-12-25 22:09:19 +00:00
if post_json_object['type'] != 'Create':
return False
2021-12-26 10:57:03 +00:00
if not has_object_dict(post_json_object):
return False
2021-12-25 22:09:19 +00:00
if post_json_object['object']['type'] != 'Note' and \
post_json_object['object']['type'] != 'Page' and \
post_json_object['object']['type'] != 'Patch' and \
post_json_object['object']['type'] != 'EncryptedMessage' and \
post_json_object['object']['type'] != 'Article':
return False
2021-12-25 22:09:19 +00:00
if post_json_object['object'].get('moderationStatus'):
return False
fields = ('to', 'cc')
2021-12-28 12:15:46 +00:00
for field_name in fields:
if not post_json_object['object'].get(field_name):
continue
2021-12-28 12:15:46 +00:00
for to_address in post_json_object['object'][field_name]:
2021-12-26 20:20:36 +00:00
if to_address.endswith('#Public'):
return False
2021-12-26 20:20:36 +00:00
if to_address.endswith('followers'):
return False
return True
2021-12-26 19:36:40 +00:00
def is_reply(post_json_object: {}, actor: str) -> bool:
"""Returns true if the given post is a reply to the given actor
"""
2021-12-25 22:09:19 +00:00
if post_json_object['type'] != 'Create':
return False
2021-12-26 10:57:03 +00:00
if not has_object_dict(post_json_object):
return False
2021-12-25 22:09:19 +00:00
if post_json_object['object'].get('moderationStatus'):
return False
2021-12-25 22:09:19 +00:00
if post_json_object['object']['type'] != 'Note' and \
post_json_object['object']['type'] != 'Page' and \
post_json_object['object']['type'] != 'EncryptedMessage' and \
post_json_object['object']['type'] != 'Article':
return False
2021-12-25 22:09:19 +00:00
if post_json_object['object'].get('inReplyTo'):
if isinstance(post_json_object['object']['inReplyTo'], str):
if post_json_object['object']['inReplyTo'].startswith(actor):
return True
2021-12-25 22:09:19 +00:00
if not post_json_object['object'].get('tag'):
return False
2021-12-25 22:09:19 +00:00
if not isinstance(post_json_object['object']['tag'], list):
return False
2021-12-25 22:09:19 +00:00
for tag in post_json_object['object']['tag']:
if not tag.get('type'):
continue
if tag['type'] == 'Mention':
if not tag.get('href'):
continue
if actor in tag['href']:
return True
return False
2021-03-12 12:04:34 +00:00
2021-12-26 19:15:36 +00:00
def contains_pgp_public_key(content: str) -> bool:
2021-03-12 12:04:34 +00:00
"""Returns true if the given content contains a PGP public key
"""
if '--BEGIN PGP PUBLIC KEY BLOCK--' in content:
if '--END PGP PUBLIC KEY BLOCK--' in content:
return True
return False
2021-12-26 19:15:36 +00:00
def is_pgp_encrypted(content: str) -> bool:
2021-03-12 12:04:34 +00:00
"""Returns true if the given content is PGP encrypted
"""
if '--BEGIN PGP MESSAGE--' in content:
if '--END PGP MESSAGE--' in content:
return True
return False
2021-03-18 17:27:46 +00:00
2021-12-26 19:15:36 +00:00
def invalid_ciphertext(content: str) -> bool:
2021-11-22 12:05:09 +00:00
"""Returns true if the given content contains an invalid key
"""
if '----BEGIN ' in content or '----END ' in content:
2021-12-26 19:15:36 +00:00
if not contains_pgp_public_key(content) and \
not is_pgp_encrypted(content):
return True
return False
2021-12-26 19:12:02 +00:00
def load_translations_from_file(base_dir: str, language: str) -> ({}, str):
2021-03-18 17:27:46 +00:00
"""Returns the translations dictionary
"""
2021-12-25 16:17:53 +00:00
if not os.path.isdir(base_dir + '/translations'):
2021-03-18 17:27:46 +00:00
print('ERROR: translations directory not found')
2021-10-29 14:33:52 +00:00
return None, None
2021-03-18 17:27:46 +00:00
if not language:
2021-12-25 23:03:28 +00:00
system_language = locale.getdefaultlocale()[0]
2021-03-18 17:27:46 +00:00
else:
2021-12-25 23:03:28 +00:00
system_language = language
if not system_language:
system_language = 'en'
if '_' in system_language:
system_language = system_language.split('_')[0]
while '/' in system_language:
system_language = system_language.split('/')[1]
if '.' in system_language:
system_language = system_language.split('.')[0]
2021-12-26 19:12:02 +00:00
translations_file = base_dir + '/translations/' + \
2021-12-25 23:03:28 +00:00
system_language + '.json'
2021-12-26 19:12:02 +00:00
if not os.path.isfile(translations_file):
2021-12-25 23:03:28 +00:00
system_language = 'en'
2021-12-26 19:12:02 +00:00
translations_file = base_dir + '/translations/' + \
2021-12-25 23:03:28 +00:00
system_language + '.json'
2021-12-26 19:12:02 +00:00
return load_json(translations_file), system_language
2021-04-22 09:27:20 +00:00
2021-12-26 19:09:04 +00:00
def dm_allowed_from_domain(base_dir: str,
nickname: str, domain: str,
sending_actor_domain: str) -> bool:
2021-04-22 09:27:20 +00:00
"""When a DM is received and the .followDMs flag file exists
Then optionally some domains can be specified as allowed,
regardless of individual follows.
i.e. Mostly you only want DMs from followers, but there are
a few particular instances that you trust
"""
2021-12-26 19:09:04 +00:00
dm_allowed_instances_file = \
2021-12-26 12:02:29 +00:00
acct_dir(base_dir, nickname, domain) + '/dmAllowedInstances.txt'
2021-12-26 19:09:04 +00:00
if not os.path.isfile(dm_allowed_instances_file):
2021-04-22 09:27:20 +00:00
return False
2021-12-26 19:09:04 +00:00
if sending_actor_domain + '\n' in open(dm_allowed_instances_file).read():
2021-04-22 09:27:20 +00:00
return True
return False
2021-05-16 15:10:39 +00:00
2021-12-26 19:01:36 +00:00
def get_occupation_skills(actor_json: {}) -> []:
2021-05-16 15:10:39 +00:00
"""Returns the list of skills for an actor
"""
2021-12-26 10:29:52 +00:00
if 'hasOccupation' not in actor_json:
2021-05-16 15:10:39 +00:00
return []
2021-12-26 10:29:52 +00:00
if not isinstance(actor_json['hasOccupation'], list):
2021-05-16 15:10:39 +00:00
return []
2021-12-26 19:01:36 +00:00
for occupation_item in actor_json['hasOccupation']:
if not isinstance(occupation_item, dict):
2021-05-16 15:10:39 +00:00
continue
2021-12-26 19:01:36 +00:00
if not occupation_item.get('@type'):
2021-05-16 15:10:39 +00:00
continue
2021-12-26 19:01:36 +00:00
if not occupation_item['@type'] == 'Occupation':
2021-05-16 15:10:39 +00:00
continue
2021-12-26 19:01:36 +00:00
if not occupation_item.get('skills'):
2021-05-16 15:10:39 +00:00
continue
2021-12-26 19:01:36 +00:00
if isinstance(occupation_item['skills'], list):
return occupation_item['skills']
2021-12-28 12:15:46 +00:00
if isinstance(occupation_item['skills'], str):
2021-12-26 19:01:36 +00:00
return [occupation_item['skills']]
2021-05-16 15:10:39 +00:00
break
return []
2021-12-26 18:58:06 +00:00
def get_occupation_name(actor_json: {}) -> str:
2021-05-16 15:10:39 +00:00
"""Returns the occupation name an actor
"""
2021-12-26 10:29:52 +00:00
if not actor_json.get('hasOccupation'):
2021-05-16 15:10:39 +00:00
return ""
2021-12-26 10:29:52 +00:00
if not isinstance(actor_json['hasOccupation'], list):
2021-05-16 15:10:39 +00:00
return ""
2021-12-26 18:58:06 +00:00
for occupation_item in actor_json['hasOccupation']:
if not isinstance(occupation_item, dict):
2021-05-16 15:10:39 +00:00
continue
2021-12-26 18:58:06 +00:00
if not occupation_item.get('@type'):
2021-05-16 15:10:39 +00:00
continue
2021-12-26 18:58:06 +00:00
if occupation_item['@type'] != 'Occupation':
2021-05-16 15:10:39 +00:00
continue
2021-12-26 18:58:06 +00:00
if not occupation_item.get('name'):
2021-05-16 15:10:39 +00:00
continue
2021-12-26 18:58:06 +00:00
if isinstance(occupation_item['name'], str):
return occupation_item['name']
2021-05-16 15:10:39 +00:00
break
return ""
2021-12-26 18:55:07 +00:00
def set_occupation_name(actor_json: {}, name: str) -> bool:
2021-05-16 15:10:39 +00:00
"""Sets the occupation name of an actor
"""
2021-12-26 10:29:52 +00:00
if not actor_json.get('hasOccupation'):
2021-05-16 15:10:39 +00:00
return False
2021-12-26 10:29:52 +00:00
if not isinstance(actor_json['hasOccupation'], list):
2021-05-16 15:10:39 +00:00
return False
2021-12-26 10:29:52 +00:00
for index in range(len(actor_json['hasOccupation'])):
2021-12-26 18:55:07 +00:00
occupation_item = actor_json['hasOccupation'][index]
if not isinstance(occupation_item, dict):
2021-05-16 15:10:39 +00:00
continue
2021-12-26 18:55:07 +00:00
if not occupation_item.get('@type'):
2021-05-16 15:10:39 +00:00
continue
2021-12-26 18:55:07 +00:00
if occupation_item['@type'] != 'Occupation':
2021-05-16 15:10:39 +00:00
continue
2021-12-26 18:55:07 +00:00
occupation_item['name'] = name
2021-05-16 15:10:39 +00:00
return True
return False
2021-12-26 18:50:59 +00:00
def set_occupation_skills_list(actor_json: {}, skills_list: []) -> bool:
2021-05-16 15:10:39 +00:00
"""Sets the occupation skills for an actor
"""
2021-12-26 10:29:52 +00:00
if 'hasOccupation' not in actor_json:
2021-05-16 15:10:39 +00:00
return False
2021-12-26 10:29:52 +00:00
if not isinstance(actor_json['hasOccupation'], list):
2021-05-16 15:10:39 +00:00
return False
2021-12-26 10:29:52 +00:00
for index in range(len(actor_json['hasOccupation'])):
2021-12-26 18:50:59 +00:00
occupation_item = actor_json['hasOccupation'][index]
if not isinstance(occupation_item, dict):
2021-05-16 15:10:39 +00:00
continue
2021-12-26 18:50:59 +00:00
if not occupation_item.get('@type'):
2021-05-16 15:10:39 +00:00
continue
2021-12-26 18:50:59 +00:00
if occupation_item['@type'] != 'Occupation':
2021-05-16 15:10:39 +00:00
continue
2021-12-26 18:50:59 +00:00
occupation_item['skills'] = skills_list
2021-05-16 15:10:39 +00:00
return True
return False
2021-12-26 18:50:59 +00:00
def is_account_dir(dir_name: str) -> bool:
"""Is the given directory an account within /accounts ?
"""
2021-12-26 18:50:59 +00:00
if '@' not in dir_name:
return False
2021-12-26 18:50:59 +00:00
if 'inbox@' in dir_name or 'news@' in dir_name:
return False
return True
2021-06-07 19:18:13 +00:00
2021-12-26 18:40:10 +00:00
def permitted_dir(path: str) -> bool:
2021-06-07 19:18:13 +00:00
"""These are special paths which should not be accessible
directly via GET or POST
"""
if path.startswith('/wfendpoints') or \
path.startswith('/keys') or \
path.startswith('/accounts'):
return False
return True
2021-06-20 15:45:29 +00:00
2021-12-26 18:37:07 +00:00
def user_agent_domain(user_agent: str, debug: bool) -> str:
2021-06-20 15:45:29 +00:00
"""If the User-Agent string contains a domain
then return it
"""
2021-12-26 18:37:07 +00:00
if '+http' not in user_agent:
2021-06-20 15:45:29 +00:00
return None
2021-12-26 18:37:07 +00:00
agent_domain = user_agent.split('+http')[1].strip()
if '://' in agent_domain:
agent_domain = agent_domain.split('://')[1]
if '/' in agent_domain:
agent_domain = agent_domain.split('/')[0]
if ')' in agent_domain:
agent_domain = agent_domain.split(')')[0].strip()
if ' ' in agent_domain:
agent_domain = agent_domain.replace(' ', '')
if ';' in agent_domain:
agent_domain = agent_domain.replace(';', '')
if '.' not in agent_domain:
2021-06-20 15:45:29 +00:00
return None
if debug:
2021-12-26 18:37:07 +00:00
print('User-Agent Domain: ' + agent_domain)
return agent_domain
2021-12-26 10:57:03 +00:00
def has_object_dict(post_json_object: {}) -> bool:
"""Returns true if the given post has an object dict
"""
2021-12-25 22:09:19 +00:00
if post_json_object.get('object'):
if isinstance(post_json_object['object'], dict):
return True
return False
2021-06-26 11:16:41 +00:00
2021-12-26 18:32:02 +00:00
def get_alt_path(actor: str, domain_full: str, calling_domain: str) -> str:
2021-06-26 11:16:41 +00:00
"""Returns alternate path from the actor
eg. https://clearnetdomain/path becomes http://oniondomain/path
"""
2021-12-26 18:29:39 +00:00
post_actor = actor
if calling_domain not in actor and domain_full in actor:
if calling_domain.endswith('.onion') or \
calling_domain.endswith('.i2p'):
post_actor = \
'http://' + calling_domain + actor.split(domain_full)[1]
print('Changed POST domain from ' + actor + ' to ' + post_actor)
return post_actor
2021-06-26 11:16:41 +00:00
2021-12-26 18:22:20 +00:00
def get_actor_property_url(actor_json: {}, property_name: str) -> str:
2021-06-26 11:16:41 +00:00
"""Returns a url property from an actor
"""
2021-12-26 10:29:52 +00:00
if not actor_json.get('attachment'):
2021-06-26 11:16:41 +00:00
return ''
2021-12-26 18:19:58 +00:00
property_name = property_name.lower()
2021-12-26 10:32:45 +00:00
for property_value in actor_json['attachment']:
if not property_value.get('name'):
2021-06-26 11:16:41 +00:00
continue
2021-12-26 18:19:58 +00:00
if not property_value['name'].lower().startswith(property_name):
2021-06-26 11:16:41 +00:00
continue
2021-12-26 10:32:45 +00:00
if not property_value.get('type'):
2021-06-26 11:16:41 +00:00
continue
2021-12-26 10:32:45 +00:00
if not property_value.get('value'):
2021-06-26 11:16:41 +00:00
continue
2021-12-26 10:32:45 +00:00
if property_value['type'] != 'PropertyValue':
2021-06-26 11:16:41 +00:00
continue
2021-12-26 10:32:45 +00:00
property_value['value'] = property_value['value'].strip()
2021-12-27 17:20:01 +00:00
prefixes = get_protocol_prefixes()
2021-12-28 12:15:46 +00:00
prefix_found = False
2021-06-26 11:16:41 +00:00
for prefix in prefixes:
2021-12-26 10:32:45 +00:00
if property_value['value'].startswith(prefix):
2021-12-28 12:15:46 +00:00
prefix_found = True
2021-06-26 11:16:41 +00:00
break
2021-12-28 12:15:46 +00:00
if not prefix_found:
2021-06-26 11:16:41 +00:00
continue
2021-12-26 10:32:45 +00:00
if '.' not in property_value['value']:
2021-06-26 11:16:41 +00:00
continue
2021-12-26 10:32:45 +00:00
if ' ' in property_value['value']:
2021-06-26 11:16:41 +00:00
continue
2021-12-26 10:32:45 +00:00
if ',' in property_value['value']:
2021-06-26 11:16:41 +00:00
continue
2021-12-26 10:32:45 +00:00
return property_value['value']
2021-06-26 11:16:41 +00:00
return ''
2021-06-26 14:21:24 +00:00
2021-12-26 18:17:37 +00:00
def remove_domain_port(domain: str) -> str:
2021-06-26 14:21:24 +00:00
"""If the domain has a port appended then remove it
eg. mydomain.com:80 becomes mydomain.com
"""
if ':' in domain:
if domain.startswith('did:'):
return domain
domain = domain.split(':')[0]
return domain
2021-12-26 18:14:21 +00:00
def get_port_from_domain(domain: str) -> int:
2021-06-26 14:21:24 +00:00
"""If the domain has a port number appended then return it
eg. mydomain.com:80 returns 80
"""
if ':' in domain:
if domain.startswith('did:'):
return None
2021-12-28 12:15:46 +00:00
port_str = domain.split(':')[1]
if port_str.isdigit():
return int(port_str)
2021-06-26 14:21:24 +00:00
return None
2021-07-06 09:44:45 +00:00
2021-12-26 18:10:53 +00:00
def valid_url_prefix(url: str) -> bool:
2021-07-06 09:44:45 +00:00
"""Does the given url have a valid prefix?
"""
if '/' not in url:
return False
prefixes = ('https:', 'http:', 'hyper:', 'i2p:', 'gnunet:')
for pre in prefixes:
if url.startswith(pre):
return True
return False
2021-12-26 18:08:08 +00:00
def remove_line_endings(text: str) -> str:
"""Removes any newline from the end of a string
"""
text = text.replace('\n', '')
text = text.replace('\r', '')
return text.strip()
2021-07-20 20:39:26 +00:00
2021-12-26 18:05:54 +00:00
def valid_password(password: str) -> bool:
2021-07-20 20:39:26 +00:00
"""Returns true if the given password is valid
"""
if len(password) < 8:
return False
return True
2021-07-25 13:09:39 +00:00
2021-12-28 12:15:46 +00:00
def is_float(value) -> bool:
"""Is the given value a float?
"""
2021-07-25 13:09:39 +00:00
try:
float(value)
return True
except ValueError:
return False
2021-07-28 09:35:21 +00:00
2021-12-26 18:01:02 +00:00
def date_string_to_seconds(date_str: str) -> int:
2021-07-28 09:35:21 +00:00
"""Converts a date string (eg "published") into seconds since epoch
"""
try:
2021-12-26 18:01:02 +00:00
expiry_time = \
datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ')
2021-07-28 09:35:21 +00:00
except BaseException:
2021-12-26 18:01:02 +00:00
print('EX: date_string_to_seconds unable to parse date ' +
str(date_str))
2021-07-28 09:35:21 +00:00
return None
2021-12-26 18:01:02 +00:00
return int(datetime.datetime.timestamp(expiry_time))
2021-07-28 09:35:21 +00:00
2021-12-26 17:55:38 +00:00
def date_seconds_to_string(date_sec: int) -> str:
2021-07-28 09:35:21 +00:00
"""Converts a date in seconds since epoch to a string
"""
2021-12-26 17:55:38 +00:00
this_date = datetime.datetime.fromtimestamp(date_sec)
return this_date.strftime("%Y-%m-%dT%H:%M:%SZ")
2021-07-30 16:06:34 +00:00
2021-12-26 17:53:07 +00:00
def has_group_type(base_dir: str, actor: str, person_cache: {},
debug: bool = False) -> bool:
2021-07-31 11:56:28 +00:00
"""Does the given actor url have a group type?
2021-07-30 16:06:34 +00:00
"""
2021-07-31 11:56:28 +00:00
# does the actor path clearly indicate that this is a group?
# eg. https://lemmy/c/groupname
2021-12-26 17:53:07 +00:00
group_paths = get_group_paths()
for grp_path in group_paths:
if grp_path in actor:
2021-08-01 13:25:11 +00:00
if debug:
2021-12-26 17:53:07 +00:00
print('grpPath ' + grp_path + ' in ' + actor)
2021-07-30 16:06:34 +00:00
return True
2021-07-31 11:56:28 +00:00
# is there a cached actor which can be examined for Group type?
2021-12-26 17:41:07 +00:00
return is_group_actor(base_dir, actor, person_cache, debug)
2021-07-31 11:56:28 +00:00
2021-12-26 17:41:07 +00:00
def is_group_actor(base_dir: str, actor: str, person_cache: {},
debug: bool = False) -> bool:
2021-07-31 11:56:28 +00:00
"""Is the given actor a group?
"""
2021-12-25 22:17:49 +00:00
if person_cache:
if person_cache.get(actor):
if person_cache[actor].get('actor'):
if person_cache[actor]['actor'].get('type'):
if person_cache[actor]['actor']['type'] == 'Group':
2021-08-01 13:25:11 +00:00
if debug:
print('Cached actor ' + actor + ' has Group type')
2021-07-31 11:56:28 +00:00
return True
return False
2021-08-01 13:25:11 +00:00
if debug:
print('Actor ' + actor + ' not in cache')
2021-12-26 17:41:07 +00:00
cached_actor_filename = \
2021-12-25 16:17:53 +00:00
base_dir + '/cache/actors/' + (actor.replace('/', '#')) + '.json'
2021-12-26 17:41:07 +00:00
if not os.path.isfile(cached_actor_filename):
2021-08-01 13:25:11 +00:00
if debug:
2021-12-26 17:41:07 +00:00
print('Cached actor file not found ' + cached_actor_filename)
2021-07-31 11:56:28 +00:00
return False
2021-12-26 17:41:07 +00:00
if '"type": "Group"' in open(cached_actor_filename).read():
2021-08-01 13:25:11 +00:00
if debug:
2021-12-26 17:41:07 +00:00
print('Group type found in ' + cached_actor_filename)
2021-07-31 11:56:28 +00:00
return True
2021-07-30 16:06:34 +00:00
return False
2021-08-07 17:44:25 +00:00
2021-12-26 17:33:24 +00:00
def is_group_account(base_dir: str, nickname: str, domain: str) -> bool:
"""Returns true if the given account is a group
"""
2021-12-26 17:33:24 +00:00
account_filename = acct_dir(base_dir, nickname, domain) + '.json'
if not os.path.isfile(account_filename):
return False
2021-12-26 17:33:24 +00:00
if '"type": "Group"' in open(account_filename).read():
return True
return False
2021-12-26 17:29:09 +00:00
def get_currencies() -> {}:
2021-08-07 17:44:25 +00:00
"""Returns a dictionary of currencies
"""
return {
"CA$": "CAD",
"J$": "JMD",
"£": "GBP",
"": "EUR",
"؋": "AFN",
"ƒ": "AWG",
"": "AZN",
"Br": "BYN",
"BZ$": "BZD",
"$b": "BOB",
"KM": "BAM",
"P": "BWP",
"лв": "BGN",
"R$": "BRL",
"": "KHR",
"$U": "UYU",
"RD$": "DOP",
"$": "USD",
"": "CRC",
"kn": "HRK",
"": "CUP",
"": "CZK",
"kr": "NOK",
"¢": "GHS",
"Q": "GTQ",
"L": "HNL",
"Ft": "HUF",
"Rp": "IDR",
"": "INR",
"": "IRR",
"": "ILS",
"¥": "JPY",
"": "KRW",
"": "LAK",
"ден": "MKD",
"RM": "MYR",
"": "MUR",
"": "MNT",
"MT": "MZN",
"C$": "NIO",
"": "NGN",
"Gs": "PYG",
"": "PLN",
"lei": "RON",
"": "RUB",
"Дин": "RSD",
"S": "SOS",
"R": "ZAR",
"CHF": "CHF",
"NT$": "TWD",
"฿": "THB",
"TT$": "TTD",
"": "UAH",
"Bs": "VEF",
"": "VND",
"Z$": "ZQD"
}
2021-08-08 11:16:18 +00:00
2021-12-26 17:26:55 +00:00
def get_supported_languages(base_dir: str) -> []:
2021-08-08 11:16:18 +00:00
"""Returns a list of supported languages
"""
2021-12-26 17:26:55 +00:00
translations_dir = base_dir + '/translations'
languages_str = []
for _, _, files in os.walk(translations_dir):
2021-12-28 12:15:46 +00:00
for fname in files:
if not fname.endswith('.json'):
2021-08-08 11:16:18 +00:00
continue
2021-12-28 12:15:46 +00:00
lang = fname.split('.')[0]
2021-08-08 11:16:18 +00:00
if len(lang) == 2:
2021-12-26 17:26:55 +00:00
languages_str.append(lang)
2021-08-08 11:16:18 +00:00
break
2021-12-26 17:26:55 +00:00
return languages_str
2021-12-26 17:18:34 +00:00
def get_category_types(base_dir: str) -> []:
"""Returns the list of ontologies
"""
2021-12-26 17:18:34 +00:00
ontology_dir = base_dir + '/ontology'
categories = []
2021-12-26 17:18:34 +00:00
for _, _, files in os.walk(ontology_dir):
2021-12-28 12:15:46 +00:00
for fname in files:
if not fname.endswith('.json'):
continue
2021-12-28 12:15:46 +00:00
if '#' in fname or '~' in fname:
2021-08-08 20:05:40 +00:00
continue
2021-12-28 12:15:46 +00:00
if fname.startswith('custom'):
2021-08-08 19:55:54 +00:00
continue
2021-12-28 12:15:46 +00:00
ontology_filename = fname.split('.')[0]
2021-12-26 17:18:34 +00:00
if 'Types' in ontology_filename:
categories.append(ontology_filename.replace('Types', ''))
break
return categories
2021-12-26 17:24:00 +00:00
def get_shares_files_list() -> []:
"""Returns the possible shares files
"""
return ('shares', 'wanted')
2021-12-26 17:21:37 +00:00
def replace_users_with_at(actor: str) -> str:
""" https://domain/users/nick becomes https://domain/@nick
"""
2021-12-26 17:15:04 +00:00
u_paths = get_user_paths()
for path in u_paths:
if path in actor:
actor = actor.replace(path, '/@')
break
return actor
2021-12-26 17:15:04 +00:00
def has_actor(post_json_object: {}, debug: bool) -> bool:
"""Does the given post have an actor?
"""
2021-12-25 22:09:19 +00:00
if post_json_object.get('actor'):
if '#' in post_json_object['actor']:
return False
return True
if debug:
2021-12-25 22:09:19 +00:00
if post_json_object.get('type'):
msg = post_json_object['type'] + ' has missing actor'
if post_json_object.get('id'):
msg += ' ' + post_json_object['id']
print(msg)
return False
2021-10-13 10:11:02 +00:00
2021-12-26 17:12:07 +00:00
def has_object_stringType(post_json_object: {}, debug: bool) -> bool:
2021-10-13 10:37:52 +00:00
"""Does the given post have a type field within an object dict?
2021-10-13 10:11:02 +00:00
"""
2021-12-26 10:57:03 +00:00
if not has_object_dict(post_json_object):
2021-10-13 10:11:02 +00:00
if debug:
2021-12-26 17:12:07 +00:00
print('has_object_stringType no object found')
2021-10-13 10:11:02 +00:00
return False
2021-12-25 22:09:19 +00:00
if post_json_object['object'].get('type'):
if isinstance(post_json_object['object']['type'], str):
2021-10-13 10:11:02 +00:00
return True
if debug:
2021-12-25 22:09:19 +00:00
if post_json_object.get('type'):
print('DEBUG: ' + post_json_object['type'] +
2021-10-13 10:11:02 +00:00
' type within object is not a string')
if debug:
2021-12-25 22:09:19 +00:00
print('No type field within object ' + post_json_object['id'])
2021-10-13 10:11:02 +00:00
return False
2021-10-13 10:37:52 +00:00
2021-12-26 15:54:46 +00:00
def has_object_string_object(post_json_object: {}, debug: bool) -> bool:
2021-10-13 10:37:52 +00:00
"""Does the given post have an object string field within an object dict?
"""
2021-12-26 10:57:03 +00:00
if not has_object_dict(post_json_object):
2021-10-13 10:37:52 +00:00
if debug:
2021-12-26 17:12:07 +00:00
print('has_object_stringType no object found')
2021-10-13 10:37:52 +00:00
return False
2021-12-25 22:09:19 +00:00
if post_json_object['object'].get('object'):
if isinstance(post_json_object['object']['object'], str):
2021-10-13 10:37:52 +00:00
return True
elif debug:
2021-12-25 22:09:19 +00:00
if post_json_object.get('type'):
print('DEBUG: ' + post_json_object['type'] +
2021-10-13 10:37:52 +00:00
' object within dict is not a string')
if debug:
2021-12-25 22:09:19 +00:00
print('No object field within dict ' + post_json_object['id'])
2021-10-13 10:37:52 +00:00
return False
2021-10-13 11:15:06 +00:00
2021-12-26 17:12:07 +00:00
def has_object_string(post_json_object: {}, debug: bool) -> bool:
2021-10-13 11:15:06 +00:00
"""Does the given post have an object string field?
"""
2021-12-25 22:09:19 +00:00
if post_json_object.get('object'):
if isinstance(post_json_object['object'], str):
2021-10-13 11:15:06 +00:00
return True
if debug:
2021-12-25 22:09:19 +00:00
if post_json_object.get('type'):
print('DEBUG: ' + post_json_object['type'] +
2021-10-13 11:15:06 +00:00
' object is not a string')
if debug:
2021-12-25 22:09:19 +00:00
print('No object field within post ' + post_json_object['id'])
2021-10-13 11:15:06 +00:00
return False
2021-11-03 11:25:26 +00:00
2021-12-26 16:59:38 +00:00
def get_new_post_endpoints() -> []:
2021-11-03 11:25:26 +00:00
"""Returns a list of endpoints for new posts
"""
return (
'newpost', 'newblog', 'newunlisted', 'newfollowers', 'newdm',
2021-11-03 11:32:38 +00:00
'newreminder', 'newreport', 'newquestion', 'newshare', 'newwanted',
'editblogpost'
2021-11-03 11:25:26 +00:00
)
2021-12-17 12:01:54 +00:00
2021-12-26 16:59:38 +00:00
def get_fav_filename_from_url(base_dir: str, favicon_url: str) -> str:
2021-12-17 12:01:54 +00:00
"""Returns the cached filename for a favicon based upon its url
"""
2021-12-26 16:59:38 +00:00
if '://' in favicon_url:
favicon_url = favicon_url.split('://')[1]
if '/favicon.' in favicon_url:
favicon_url = favicon_url.replace('/favicon.', '.')
return base_dir + '/favicons/' + favicon_url.replace('/', '-')