Merge branch 'main' of gitlab.com:bashrc2/epicyon

merge-requests/30/head
Bob Mottram 2022-06-10 20:58:02 +01:00
commit ae62053310
31 changed files with 274 additions and 278 deletions

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "ActivityPub"
import os
from utils import text_in_file
from utils import has_object_string_object
from utils import has_users_path
from utils import get_full_domain
@ -164,8 +165,8 @@ def _accept_follow(base_dir: str, message_json: {},
unfollowed_filename = \
acct_dir(base_dir, nickname, accepted_domain_full) + '/unfollowed.txt'
if os.path.isfile(unfollowed_filename):
if followed_nickname + '@' + followed_domain_full in \
open(unfollowed_filename, encoding='utf-8').read():
if text_in_file(followed_nickname + '@' + followed_domain_full,
unfollowed_filename):
if debug:
print('DEBUG: follow accept arrived for ' +
nickname + '@' + accepted_domain_full +

View File

@ -15,6 +15,7 @@ import secrets
import datetime
from utils import is_system_account
from utils import has_users_path
from utils import text_in_file
def _hash_password(password: str) -> str:
@ -177,7 +178,7 @@ def store_basic_credentials(base_dir: str,
password_file = base_dir + '/accounts/passwords'
store_str = nickname + ':' + _hash_password(password)
if os.path.isfile(password_file):
if nickname + ':' in open(password_file, encoding='utf-8').read():
if text_in_file(nickname + ':', password_file):
try:
with open(password_file, 'r', encoding='utf-8') as fin:
with open(password_file + '.new', 'w+',

View File

@ -33,6 +33,7 @@ from utils import get_nickname_from_actor
from utils import acct_dir
from utils import local_actor_url
from utils import has_actor
from utils import text_in_file
from conversation import mute_conversation
from conversation import unmute_conversation
@ -46,8 +47,7 @@ def add_global_block(base_dir: str,
# is the handle already blocked?
block_handle = block_nickname + '@' + block_domain
if os.path.isfile(blocking_filename):
if block_handle in open(blocking_filename,
encoding='utf-8').read():
if text_in_file(block_handle, blocking_filename):
return False
# block an account handle or domain
try:
@ -60,8 +60,7 @@ def add_global_block(base_dir: str,
block_hashtag = block_nickname
# is the hashtag already blocked?
if os.path.isfile(blocking_filename):
if block_hashtag + '\n' in \
open(blocking_filename, encoding='utf-8').read():
if text_in_file(block_hashtag + '\n', blocking_filename):
return False
# block a hashtag
try:
@ -85,16 +84,14 @@ def add_block(base_dir: str, nickname: str, domain: str,
blocking_filename = acct_dir(base_dir, nickname, domain) + '/blocking.txt'
block_handle = block_nickname + '@' + block_domain
if os.path.isfile(blocking_filename):
if block_handle + '\n' in open(blocking_filename,
encoding='utf-8').read():
if text_in_file(block_handle + '\n', blocking_filename):
return False
# if we are following then unfollow
following_filename = \
acct_dir(base_dir, nickname, domain) + '/following.txt'
if os.path.isfile(following_filename):
if block_handle + '\n' in open(following_filename,
encoding='utf-8').read():
if text_in_file(block_handle + '\n', following_filename):
following_str = ''
try:
with open(following_filename, 'r',
@ -119,8 +116,7 @@ def add_block(base_dir: str, nickname: str, domain: str,
followers_filename = \
acct_dir(base_dir, nickname, domain) + '/followers.txt'
if os.path.isfile(followers_filename):
if block_handle + '\n' in open(followers_filename,
encoding='utf-8').read():
if text_in_file(block_handle + '\n', followers_filename):
followers_str = ''
try:
with open(followers_filename, 'r',
@ -159,8 +155,7 @@ def remove_global_block(base_dir: str,
if not unblock_nickname.startswith('#'):
unblock_handle = unblock_nickname + '@' + unblock_domain
if os.path.isfile(unblocking_filename):
if unblock_handle in open(unblocking_filename,
encoding='utf-8').read():
if text_in_file(unblock_handle, unblocking_filename):
try:
with open(unblocking_filename, 'r',
encoding='utf-8') as fp_unblock:
@ -187,8 +182,7 @@ def remove_global_block(base_dir: str,
else:
unblock_hashtag = unblock_nickname
if os.path.isfile(unblocking_filename):
if unblock_hashtag + '\n' in open(unblocking_filename,
encoding='utf-8').read():
if text_in_file(unblock_hashtag + '\n', unblocking_filename):
try:
with open(unblocking_filename, 'r',
encoding='utf-8') as fp_unblock:
@ -224,8 +218,7 @@ def remove_block(base_dir: str, nickname: str, domain: str,
acct_dir(base_dir, nickname, domain) + '/blocking.txt'
unblock_handle = unblock_nickname + '@' + unblock_domain
if os.path.isfile(unblocking_filename):
if unblock_handle in open(unblocking_filename,
encoding='utf-8').read():
if text_in_file(unblock_handle, unblocking_filename):
try:
with open(unblocking_filename, 'r',
encoding='utf-8') as fp_unblock:
@ -262,8 +255,7 @@ def is_blocked_hashtag(base_dir: str, hashtag: str) -> bool:
hashtag = hashtag.strip('\n').strip('\r')
if not hashtag.startswith('#'):
hashtag = '#' + hashtag
if hashtag + '\n' in open(global_blocking_filename,
encoding='utf-8').read():
if text_in_file(hashtag + '\n', global_blocking_filename):
return True
return False
@ -373,11 +365,10 @@ def is_blocked_domain(base_dir: str, domain: str,
allow_filename = base_dir + '/accounts/allowedinstances.txt'
# instance allow list
if not short_domain:
if domain not in open(allow_filename, encoding='utf-8').read():
if not text_in_file(domain, allow_filename):
return True
else:
if short_domain not in open(allow_filename,
encoding='utf-8').read():
if not text_in_file(short_domain, allow_filename):
return True
return False
@ -407,44 +398,37 @@ def is_blocked(base_dir: str, nickname: str, domain: str,
else:
global_blocks_filename = base_dir + '/accounts/blocking.txt'
if os.path.isfile(global_blocks_filename):
if '*@' + block_domain in open(global_blocks_filename,
encoding='utf-8').read():
if text_in_file('*@' + block_domain, global_blocks_filename):
return True
if block_handle:
block_str = block_handle + '\n'
if block_str in open(global_blocks_filename,
encoding='utf-8').read():
if text_in_file(block_str, global_blocks_filename):
return True
else:
# instance allow list
allow_filename = base_dir + '/accounts/allowedinstances.txt'
short_domain = _get_short_domain(block_domain)
if not short_domain:
if block_domain + '\n' not in open(allow_filename,
encoding='utf-8').read():
if not text_in_file(block_domain + '\n', allow_filename):
return True
else:
if short_domain + '\n' not in open(allow_filename,
encoding='utf-8').read():
if not text_in_file(short_domain + '\n', allow_filename):
return True
# account level allow list
account_dir = acct_dir(base_dir, nickname, domain)
allow_filename = account_dir + '/allowedinstances.txt'
if os.path.isfile(allow_filename):
if block_domain + '\n' not in open(allow_filename,
encoding='utf-8').read():
if not text_in_file(block_domain + '\n', allow_filename):
return True
# account level block list
blocking_filename = account_dir + '/blocking.txt'
if os.path.isfile(blocking_filename):
if '*@' + block_domain + '\n' in open(blocking_filename,
encoding='utf-8').read():
if text_in_file('*@' + block_domain + '\n', blocking_filename):
return True
if block_handle:
if block_handle + '\n' in open(blocking_filename,
encoding='utf-8').read():
if text_in_file(block_handle + '\n', blocking_filename):
return True
return False

View File

@ -17,6 +17,7 @@ from webapp_utils import html_footer
from webapp_utils import get_post_attachments_as_html
from webapp_utils import edit_text_area
from webapp_media import add_embedded_elements
from utils import text_in_file
from utils import local_actor_url
from utils import get_actor_languages_list
from utils import get_base_content_from_post
@ -929,8 +930,7 @@ def path_contains_blog_link(base_dir: str,
acct_dir(base_dir, nickname, domain) + '/tlblogs.index'
if not os.path.isfile(blog_index_filename):
return None, None
if '#' + user_ending2[1] + '.' not in open(blog_index_filename,
encoding='utf-8').read():
if not text_in_file('#' + user_ending2[1] + '.', blog_index_filename):
return None, None
message_id = local_actor_url(http_prefix, nickname, domain_full) + \
'/statuses/' + user_ending2[1]

View File

@ -28,6 +28,7 @@ from utils import acct_dir
from utils import local_actor_url
from utils import has_actor
from utils import has_object_string_type
from utils import text_in_file
from posts import get_person_box
from session import post_json
@ -71,8 +72,7 @@ def undo_bookmarks_collection_entry(recent_posts_cache: {},
else:
bookmark_index = post_filename.strip()
bookmark_index = bookmark_index.replace('\n', '').replace('\r', '')
if bookmark_index not in open(bookmarks_index_filename,
encoding='utf-8').read():
if not text_in_file(bookmark_index, bookmarks_index_filename):
return
index_str = ''
try:
@ -238,8 +238,7 @@ def update_bookmarks_collection(recent_posts_cache: {},
acct_dir(base_dir, nickname, domain) + '/bookmarks.index'
bookmark_index = post_filename.split('/')[-1]
if os.path.isfile(bookmarks_index_filename):
if bookmark_index not in open(bookmarks_index_filename,
encoding='utf-8').read():
if not text_in_file(bookmark_index, bookmarks_index_filename):
try:
with open(bookmarks_index_filename, 'r+',
encoding='utf-8') as bmi_file:

View File

@ -1683,7 +1683,7 @@ def create_edits_html(edits_json: {}, post_json_object: {},
content = prev_content
if not edits_str:
return ''
return '<details><summary class="cw">' + \
return '<details><summary class="cw" tabindex="10">' + \
translate['SHOW EDITS'] + '</summary>' + \
edits_str + '</details>'

View File

@ -11,6 +11,7 @@ import os
from utils import has_object_dict
from utils import acct_dir
from utils import remove_id_ending
from utils import text_in_file
def _get_conversation_filename(base_dir: str, nickname: str, domain: str,
@ -50,8 +51,7 @@ def update_conversation(base_dir: str, nickname: str, domain: str,
except OSError:
print('EX: update_conversation ' +
'unable to write to ' + conversation_filename)
elif post_id + '\n' not in open(conversation_filename,
encoding='utf-8').read():
elif not text_in_file(post_id + '\n', conversation_filename):
try:
with open(conversation_filename, 'a+',
encoding='utf-8') as conv_file:

View File

@ -251,6 +251,7 @@ from languages import set_actor_languages
from languages import get_understood_languages
from like import update_likes_collection
from reaction import update_reaction_collection
from utils import text_in_file
from utils import is_onion_request
from utils import is_i2p_request
from utils import get_account_timezone
@ -510,7 +511,7 @@ class PubServer(BaseHTTPRequestHandler):
if os.path.isfile(votes_filename):
# have we already voted on this?
if message_id in open(votes_filename, encoding='utf-8').read():
if text_in_file(message_id, votes_filename):
print('Already voted on message ' + message_id)
return
@ -2921,7 +2922,8 @@ class PubServer(BaseHTTPRequestHandler):
nw_filename = newswire_blocked_filename
nw_written = False
try:
with open(nw_filename, 'w+') as nofile:
with open(nw_filename, 'w+',
encoding='utf-8') as nofile:
nofile.write('\n')
nw_written = True
except OSError as ex:
@ -5671,7 +5673,8 @@ class PubServer(BaseHTTPRequestHandler):
city_filename = \
acct_dir(base_dir, nickname, domain) + '/city.txt'
try:
with open(city_filename, 'w+') as fp_city:
with open(city_filename, 'w+',
encoding='utf-8') as fp_city:
fp_city.write(fields['cityDropdown'])
except OSError:
print('EX: unable to write city ' + city_filename)
@ -6402,7 +6405,8 @@ class PubServer(BaseHTTPRequestHandler):
# if the list was given as comma separated
eds = fields['editors'].split(',')
try:
with open(editors_file, 'w+') as edfil:
with open(editors_file, 'w+',
encoding='utf-8') as edfil:
for ed_nick in eds:
ed_nick = ed_nick.strip()
ed_dir = base_dir + \
@ -6427,8 +6431,8 @@ class PubServer(BaseHTTPRequestHandler):
# nicknames on separate lines
eds = fields['editors'].split('\n')
try:
with open(editors_file,
'w+') as edfile:
with open(editors_file, 'w+',
encoding='utf-8') as edfile:
for ed_nick in eds:
ed_nick = ed_nick.strip()
ed_dir = \
@ -8105,7 +8109,8 @@ class PubServer(BaseHTTPRequestHandler):
if os.path.isfile(ontology_filename):
ontology_file = None
try:
with open(ontology_filename, 'r') as fp_ont:
with open(ontology_filename, 'r',
encoding='utf-8') as fp_ont:
ontology_file = fp_ont.read()
except OSError:
print('EX: unable to read ontology ' + ontology_filename)
@ -11508,7 +11513,7 @@ class PubServer(BaseHTTPRequestHandler):
return True
ssml_str = None
try:
with open(ssml_filename, 'r') as fp_ssml:
with open(ssml_filename, 'r', encoding='utf-8') as fp_ssml:
ssml_str = fp_ssml.read()
except OSError:
pass
@ -18638,7 +18643,8 @@ class PubServer(BaseHTTPRequestHandler):
media_tag_filename = media_filename + '.etag'
if os.path.isfile(media_tag_filename):
try:
with open(media_tag_filename, 'r') as efile:
with open(media_tag_filename, 'r',
encoding='utf-8') as efile:
etag = efile.read()
except OSError:
print('EX: do_HEAD unable to read ' +
@ -18654,7 +18660,8 @@ class PubServer(BaseHTTPRequestHandler):
if media_binary:
etag = md5(media_binary).hexdigest() # nosec
try:
with open(media_tag_filename, 'w+') as efile:
with open(media_tag_filename, 'w+',
encoding='utf-8') as efile:
efile.write(etag)
except OSError:
print('EX: do_HEAD unable to write ' +
@ -20744,25 +20751,25 @@ class PubServerUnitTest(PubServer):
class EpicyonServer(ThreadingHTTPServer):
def handle_error(self, request, client_address):
# surpress connection reset errors
cls, e = sys.exc_info()[:2]
cls, e_ret = sys.exc_info()[:2]
if cls is ConnectionResetError:
if e.errno != errno.ECONNRESET:
print('ERROR: (EpicyonServer) ' + str(cls) + ", " + str(e))
if e_ret.errno != errno.ECONNRESET:
print('ERROR: (EpicyonServer) ' + str(cls) + ", " + str(e_ret))
pass
elif cls is BrokenPipeError:
pass
else:
print('ERROR: (EpicyonServer) ' + str(cls) + ", " + str(e))
print('ERROR: (EpicyonServer) ' + str(cls) + ", " + str(e_ret))
return HTTPServer.handle_error(self, request, client_address)
def run_posts_queue(base_dir: str, send_threads: [], debug: bool,
timeoutMins: int) -> None:
timeout_mins: int) -> None:
"""Manages the threads used to send posts
"""
while True:
time.sleep(1)
remove_dormant_threads(base_dir, send_threads, debug, timeoutMins)
remove_dormant_threads(base_dir, send_threads, debug, timeout_mins)
def run_shares_expire(version_number: str, base_dir: str) -> None:
@ -20819,7 +20826,8 @@ def load_tokens(base_dir: str, tokens_dict: {}, tokens_lookup: {}) -> None:
nickname = handle.split('@')[0]
token = None
try:
with open(token_filename, 'r') as fp_tok:
with open(token_filename, 'r',
encoding='utf-8') as fp_tok:
token = fp_tok.read()
except BaseException as ex:
print('WARN: Unable to read token for ' +
@ -20862,7 +20870,7 @@ def run_daemon(preferred_podcast_formats: [],
max_news_posts: int,
max_mirrored_articles: int,
max_newswire_feed_size_kb: int,
max_newswire_postsPerSource: int,
max_newswire_posts_per_source: int,
show_published_date_only: bool,
voting_time_mins: int,
positive_voting: bool,
@ -21104,7 +21112,7 @@ def run_daemon(preferred_podcast_formats: [],
# this is the maximum number of posts to show for each.
# This avoids one or two sources from dominating the news,
# and also prevents big feeds from slowing down page load times
httpd.max_newswire_postsPerSource = max_newswire_postsPerSource
httpd.max_newswire_posts_per_source = max_newswire_posts_per_source
# Show only the date at the bottom of posts, and not the time
httpd.show_published_date_only = show_published_date_only

View File

@ -16,6 +16,7 @@ import webbrowser
import urllib.parse
from pathlib import Path
from random import randint
from utils import text_in_file
from utils import disallow_announce
from utils import disallow_reply
from utils import get_base_content_from_post
@ -169,8 +170,7 @@ def _mark_post_as_read(actor: str, post_id: str, post_category: str) -> None:
read_posts_dir = home_dir + '/.config/epicyon/' + handle
read_posts_filename = read_posts_dir + '/' + post_category + '.txt'
if os.path.isfile(read_posts_filename):
if post_id in open(read_posts_filename,
encoding='utf-8').read():
if text_in_file(post_id, read_posts_filename):
return
try:
# prepend to read posts file
@ -201,7 +201,7 @@ def _has_read_post(actor: str, post_id: str, post_category: str) -> bool:
read_posts_dir = home_dir + '/.config/epicyon/' + handle
read_posts_filename = read_posts_dir + '/' + post_category + '.txt'
if os.path.isfile(read_posts_filename):
if post_id in open(read_posts_filename).read():
if text_in_file(post_id, read_posts_filename):
return True
return False

View File

@ -68,6 +68,7 @@ from tests import test_update_actor
from tests import run_all_tests
from auth import store_basic_credentials
from auth import create_password
from utils import text_in_file
from utils import remove_domain_port
from utils import get_port_from_domain
from utils import has_users_path
@ -2748,7 +2749,7 @@ def _command_options() -> None:
sys.exit()
password_file = base_dir + '/accounts/passwords'
if os.path.isfile(password_file):
if nickname + ':' in open(password_file).read():
if text_in_file(nickname + ':', password_file):
store_basic_credentials(base_dir, nickname, new_password)
print('Password for ' + nickname + ' was changed')
else:

View File

@ -9,6 +9,7 @@ __module_group__ = "Moderation"
import os
from utils import acct_dir
from utils import text_in_file
def add_filter(base_dir: str, nickname: str, domain: str, words: str) -> bool:
@ -16,7 +17,7 @@ def add_filter(base_dir: str, nickname: str, domain: str, words: str) -> bool:
"""
filters_filename = acct_dir(base_dir, nickname, domain) + '/filters.txt'
if os.path.isfile(filters_filename):
if words in open(filters_filename, encoding='utf-8').read():
if text_in_file(words, filters_filename):
return False
try:
with open(filters_filename, 'a+',
@ -37,7 +38,7 @@ def add_global_filter(base_dir: str, words: str) -> bool:
return False
filters_filename = base_dir + '/accounts/filters.txt'
if os.path.isfile(filters_filename):
if words in open(filters_filename, encoding='utf-8').read():
if text_in_file(words, filters_filename):
return False
try:
with open(filters_filename, 'a+', encoding='utf-8') as filters_file:
@ -54,7 +55,7 @@ def remove_filter(base_dir: str, nickname: str, domain: str,
filters_filename = acct_dir(base_dir, nickname, domain) + '/filters.txt'
if not os.path.isfile(filters_filename):
return False
if words not in open(filters_filename, encoding='utf-8').read():
if not text_in_file(words, filters_filename):
return False
new_filters_filename = filters_filename + '.new'
try:
@ -79,7 +80,7 @@ def remove_global_filter(base_dir: str, words: str) -> bool:
filters_filename = base_dir + '/accounts/filters.txt'
if not os.path.isfile(filters_filename):
return False
if words not in open(filters_filename, encoding='utf-8').read():
if not text_in_file(words, filters_filename):
return False
new_filters_filename = filters_filename + '.new'
try:

View File

@ -30,6 +30,7 @@ from utils import get_user_paths
from utils import acct_dir
from utils import has_group_type
from utils import local_actor_url
from utils import text_in_file
from acceptreject import create_accept
from acceptreject import create_reject
from webfinger import webfinger_handle
@ -94,8 +95,7 @@ def _pre_approved_follower(base_dir: str,
account_dir = base_dir + '/accounts/' + handle
approved_filename = account_dir + '/approved.txt'
if os.path.isfile(approved_filename):
if approve_handle in open(approved_filename,
encoding='utf-8').read():
if text_in_file(approve_handle, approved_filename):
return True
return False
@ -115,8 +115,7 @@ def _remove_from_follow_base(base_dir: str,
' to remove ' + handle + ' from')
return
accept_deny_actor = None
if accept_or_deny_handle not in open(approve_follows_filename,
encoding='utf-8').read():
if not text_in_file(accept_or_deny_handle, approve_follows_filename):
# is this stored in the file as an actor rather than a handle?
accept_deny_nickname = accept_or_deny_handle.split('@')[0]
accept_deny_domain = accept_or_deny_handle.split('@')[1]
@ -127,8 +126,7 @@ def _remove_from_follow_base(base_dir: str,
for users_name in users_paths:
accept_deny_actor = \
'://' + accept_deny_domain + users_name + accept_deny_nickname
if accept_deny_actor in open(approve_follows_filename,
encoding='utf-8').read():
if text_in_file(accept_deny_actor, approve_follows_filename):
actor_found = True
break
if not actor_found:
@ -186,7 +184,7 @@ def is_following_actor(base_dir: str,
return False
if actor.startswith('@'):
actor = actor[1:]
if actor.lower() in open(following_file, encoding='utf-8').read().lower():
if text_in_file(actor, following_file, False):
return True
following_nickname = get_nickname_from_actor(actor)
if not following_nickname:
@ -196,8 +194,7 @@ def is_following_actor(base_dir: str,
following_handle = \
get_full_domain(following_nickname + '@' + following_domain,
following_port)
if following_handle.lower() in open(following_file,
encoding='utf-8').read().lower():
if text_in_file(following_handle, following_file, False):
return True
return False
@ -317,8 +314,7 @@ def unfollow_account(base_dir: str, nickname: str, domain: str,
print('DEBUG: follow file ' + filename + ' was not found')
return False
handle_to_unfollow_lower = handle_to_unfollow.lower()
if handle_to_unfollow_lower not in open(filename,
encoding='utf-8').read().lower():
if not text_in_file(handle_to_unfollow_lower, filename, False):
if debug:
print('DEBUG: handle to unfollow ' + handle_to_unfollow +
' is not in ' + filename)
@ -344,8 +340,8 @@ def unfollow_account(base_dir: str, nickname: str, domain: str,
# later arrives then it can be ignored
unfollowed_filename = base_dir + '/accounts/' + handle + '/unfollowed.txt'
if os.path.isfile(unfollowed_filename):
if handle_to_unfollow_lower not in \
open(unfollowed_filename, encoding='utf-8').read().lower():
if not text_in_file(handle_to_unfollow_lower,
unfollowed_filename, False):
try:
with open(unfollowed_filename, 'a+',
encoding='utf-8') as fp_unfoll:
@ -688,8 +684,7 @@ def store_follow_request(base_dir: str,
# should this follow be denied?
deny_follows_filename = accounts_dir + '/followrejects.txt'
if os.path.isfile(deny_follows_filename):
if approve_handle in open(deny_follows_filename,
encoding='utf-8').read():
if text_in_file(approve_handle, deny_follows_filename):
remove_from_follow_requests(base_dir, nickname_to_follow,
domain_to_follow, approve_handle,
debug)
@ -708,8 +703,7 @@ def store_follow_request(base_dir: str,
approve_handle = '!' + approve_handle
if os.path.isfile(approve_follows_filename):
if approve_handle not in open(approve_follows_filename,
encoding='utf-8').read():
if not text_in_file(approve_handle, approve_follows_filename):
try:
with open(approve_follows_filename, 'a+',
encoding='utf-8') as fp_approve:
@ -924,8 +918,7 @@ def send_follow_request(session, base_dir: str,
unfollowed_filename = \
acct_dir(base_dir, nickname, domain) + '/unfollowed.txt'
if os.path.isfile(unfollowed_filename):
if follow_handle in open(unfollowed_filename,
encoding='utf-8').read():
if text_in_file(follow_handle, unfollowed_filename):
unfollowed_file = None
try:
with open(unfollowed_filename, 'r',
@ -1404,7 +1397,7 @@ def get_followers_of_actor(base_dir: str, actor: str, debug: bool) -> {}:
if debug:
print('DEBUG: checking if ' + actor_handle +
' in ' + following_filename)
if actor_handle in open(following_filename).read():
if text_in_file(actor_handle, following_filename):
if debug:
print('DEBUG: ' + account +
' follows ' + actor_handle)

3
git.py
View File

@ -11,6 +11,7 @@ import os
import html
from utils import acct_dir
from utils import has_object_string_type
from utils import text_in_file
def _git_format_content(content: str) -> str:
@ -38,7 +39,7 @@ def _get_git_project_name(base_dir: str, nickname: str, domain: str,
return None
subject_line_words = subject.lower().split(' ')
for word in subject_line_words:
if word in open(git_projects_filename, encoding='utf-8').read():
if text_in_file(word, git_projects_filename):
return word
return None

View File

@ -24,6 +24,7 @@ from utils import get_display_name
from utils import delete_post
from utils import get_status_number
from utils import get_full_domain
from utils import text_in_file
from filters import is_filtered
from context import get_individual_post_context
from session import get_method
@ -70,8 +71,7 @@ def _remove_event_from_timeline(event_id: str,
tl_events_filename: str) -> None:
"""Removes the given event Id from the timeline
"""
if event_id + '\n' not in open(tl_events_filename,
encoding='utf-8').read():
if not text_in_file(event_id + '\n', tl_events_filename):
return
with open(tl_events_filename, 'r',
encoding='utf-8') as fp_tl:
@ -166,8 +166,7 @@ def save_event_post(base_dir: str, handle: str, post_id: str,
# Does this event post already exist within the calendar month?
if os.path.isfile(calendar_filename):
if post_id in open(calendar_filename,
encoding='utf-8').read():
if text_in_file(post_id, calendar_filename):
# Event post already exists
return False
@ -616,7 +615,7 @@ def get_this_weeks_events(base_dir: str, nickname: str, domain: str) -> {}:
calendar_post_ids = []
recreate_events_file = False
with open(calendar_filename, 'r') as events_file:
with open(calendar_filename, 'r', encoding='utf-8') as events_file:
for post_id in events_file:
post_id = post_id.replace('\n', '').replace('\r', '')
post_filename = locate_post(base_dir, nickname, domain, post_id)
@ -760,7 +759,7 @@ def remove_calendar_event(base_dir: str, nickname: str, domain: str,
return
if '/' in message_id:
message_id = message_id.replace('/', '#')
if message_id not in open(calendar_filename, encoding='utf-8').read():
if not text_in_file(message_id, calendar_filename):
return
lines = None
with open(calendar_filename, 'r', encoding='utf-8') as fp_cal:

View File

@ -18,6 +18,7 @@ from languages import understood_post_language
from like import update_likes_collection
from reaction import update_reaction_collection
from reaction import valid_emoji_content
from utils import text_in_file
from utils import get_media_descriptions_from_post
from utils import get_summary_from_post
from utils import delete_cached_html
@ -448,7 +449,7 @@ def valid_inbox(base_dir: str, nickname: str, domain: str) -> bool:
if not os.path.isfile(filename):
print('filename: ' + filename)
return False
if 'postNickname' in open(filename, encoding='utf-8').read():
if text_in_file('postNickname', filename):
print('queue file incorrectly saved to ' + filename)
return False
break
@ -2557,8 +2558,7 @@ def populate_replies(base_dir: str, http_prefix: str, domain: str,
encoding='utf-8'))
if num_lines > max_replies:
return False
if message_id not in open(post_replies_filename,
encoding='utf-8').read():
if not text_in_file(message_id, post_replies_filename):
try:
with open(post_replies_filename, 'a+',
encoding='utf-8') as replies_file:
@ -2875,7 +2875,7 @@ def _like_notify(base_dir: str, domain: str, onion_domain: str,
like_file = account_dir + '/.newLike'
if os.path.isfile(like_file):
if '##sent##' not in open(like_file).read():
if not text_in_file('##sent##', like_file):
return
liker_nickname = get_nickname_from_actor(actor)
@ -2937,7 +2937,7 @@ def _reaction_notify(base_dir: str, domain: str, onion_domain: str,
reaction_file = account_dir + '/.newReaction'
if os.path.isfile(reaction_file):
if '##sent##' not in open(reaction_file, encoding='utf-8').read():
if not text_in_file('##sent##', reaction_file):
return
reaction_nickname = get_nickname_from_actor(actor)
@ -4170,7 +4170,8 @@ def _inbox_after_initial(server, inbox_start_time,
print('MUTE REPLY: ' + destination_filename)
destination_filename_muted = destination_filename + '.muted'
try:
with open(destination_filename_muted, 'w+') as mute_file:
with open(destination_filename_muted, 'w+',
encoding='utf-8') as mute_file:
mute_file.write('\n')
except OSError:
print('EX: unable to write ' + destination_filename_muted)
@ -4569,8 +4570,7 @@ def _check_json_signature(base_dir: str, queue_json: {}) -> (bool, bool):
already_unknown = False
if os.path.isfile(unknown_contexts_file):
if unknown_context in \
open(unknown_contexts_file, encoding='utf-8').read():
if text_in_file(unknown_context, unknown_contexts_file):
already_unknown = True
if not already_unknown:
@ -4588,13 +4588,13 @@ def _check_json_signature(base_dir: str, queue_json: {}) -> (bool, bool):
already_unknown = False
if os.path.isfile(unknown_signatures_file):
if jwebsig_type in \
open(unknown_signatures_file, encoding='utf-8').read():
if text_in_file(jwebsig_type, unknown_signatures_file):
already_unknown = True
if not already_unknown:
try:
with open(unknown_signatures_file, 'a+') as unknown_file:
with open(unknown_signatures_file, 'a+',
encoding='utf-8') as unknown_file:
unknown_file.write(jwebsig_type + '\n')
except OSError:
print('EX: unable to append ' + unknown_signatures_file)
@ -4816,7 +4816,7 @@ def _receive_follow_request(session, session_onion, session_i2p,
print('Updating followers file: ' +
followers_filename + ' adding ' + approve_handle)
if os.path.isfile(followers_filename):
if approve_handle not in open(followers_filename).read():
if not text_in_file(approve_handle, followers_filename):
group_account = \
has_group_type(base_dir,
message_json['actor'], person_cache)

View File

@ -16,6 +16,7 @@ from utils import remove_domain_port
from utils import get_port_from_domain
from utils import get_user_paths
from utils import acct_dir
from utils import text_in_file
from threads import thread_with_trace
from session import create_session
@ -38,8 +39,7 @@ def manual_deny_follow_request(session, session_onion, session_i2p,
# has this handle already been rejected?
rejected_follows_filename = accounts_dir + '/followrejects.txt'
if os.path.isfile(rejected_follows_filename):
if deny_handle in open(rejected_follows_filename,
encoding='utf-8').read():
if text_in_file(deny_handle, rejected_follows_filename):
remove_from_follow_requests(base_dir, nickname, domain,
deny_handle, debug)
print(deny_handle +
@ -115,15 +115,17 @@ def _approve_follower_handle(account_dir: str, approve_handle: str) -> None:
"""
approved_filename = account_dir + '/approved.txt'
if os.path.isfile(approved_filename):
if approve_handle not in open(approved_filename).read():
if not text_in_file(approve_handle, approved_filename):
try:
with open(approved_filename, 'a+') as approved_file:
with open(approved_filename, 'a+',
encoding='utf-8') as approved_file:
approved_file.write(approve_handle + '\n')
except OSError:
print('EX: unable to append ' + approved_filename)
else:
try:
with open(approved_filename, 'w+') as approved_file:
with open(approved_filename, 'w+',
encoding='utf-8') as approved_file:
approved_file.write(approve_handle + '\n')
except OSError:
print('EX: unable to write ' + approved_filename)
@ -280,8 +282,7 @@ def manual_approve_follow_request(session, session_onion, session_i2p,
# update the followers
print('Manual follow accept: updating ' + followers_filename)
if os.path.isfile(followers_filename):
if approve_handle_full not in open(followers_filename,
encoding='utf-8').read():
if not text_in_file(approve_handle_full, followers_filename):
try:
with open(followers_filename, 'r+',
encoding='utf-8') as followers_file:
@ -308,8 +309,7 @@ def manual_approve_follow_request(session, session_onion, session_i2p,
# only update the follow requests file if the follow is confirmed to be
# in followers.txt
if approve_handle_full in open(followers_filename,
encoding='utf-8').read():
if text_in_file(approve_handle_full, followers_filename):
# mark this handle as approved for following
_approve_follower_handle(account_dir, approve_handle)
# update the follow requests with the handles not yet approved

View File

@ -318,12 +318,13 @@ def _spoof_meta_data(base_dir: str, nickname: str, domain: str,
decoy_seed_filename = acct_dir(base_dir, nickname, domain) + '/decoyseed'
decoy_seed = 63725
if os.path.isfile(decoy_seed_filename):
with open(decoy_seed_filename, 'r') as fp_seed:
with open(decoy_seed_filename, 'r', encoding='utf-8') as fp_seed:
decoy_seed = int(fp_seed.read())
else:
decoy_seed = randint(10000, 10000000000000000)
try:
with open(decoy_seed_filename, 'w+') as fp_seed:
with open(decoy_seed_filename, 'w+',
encoding='utf-8') as fp_seed:
fp_seed.write(str(decoy_seed))
except OSError:
print('EX: unable to write ' + decoy_seed_filename)

View File

@ -34,6 +34,7 @@ from utils import get_status_number
from utils import clear_from_post_caches
from utils import dangerous_markup
from utils import local_actor_url
from utils import text_in_file
from inbox import store_hash_tags
from session import create_session
@ -46,7 +47,7 @@ def _update_feeds_outbox_index(base_dir: str, domain: str,
index_filename = base_path + '/outbox.index'
if os.path.isfile(index_filename):
if post_id not in open(index_filename, encoding='utf-8').read():
if not text_in_file(post_id, index_filename):
try:
with open(index_filename, 'r+') as feeds_file:
content = feeds_file.read()
@ -813,7 +814,7 @@ def run_newswire_daemon(base_dir: str, httpd,
print('Updating newswire feeds')
new_newswire = \
get_dict_from_newswire(httpd.session, base_dir, domain,
httpd.max_newswire_postsPerSource,
httpd.max_newswire_posts_per_source,
httpd.max_newswire_feed_size_kb,
httpd.maxTags,
httpd.max_feed_item_size_kb,

View File

@ -10,6 +10,7 @@ __module_group__ = "Calendar"
import os
from utils import remove_domain_port
from utils import acct_dir
from utils import text_in_file
def _notify_on_post_arrival(base_dir: str, nickname: str, domain: str,
@ -30,8 +31,7 @@ def _notify_on_post_arrival(base_dir: str, nickname: str, domain: str,
handle = following_nickname + '@' + following_domain
# check that you are following this handle
if handle + '\n' not in open(following_filename,
encoding='utf-8').read():
if not text_in_file(handle + '\n', following_filename):
print('WARN: ' + handle + ' is not in ' + following_filename)
return
@ -113,5 +113,4 @@ def notify_when_person_posts(base_dir: str, nickname: str, domain: str,
with open(notify_on_post_filename, 'w+',
encoding='utf-8') as fp_notify:
fp_notify.write('')
return handle + '\n' in open(notify_on_post_filename,
encoding='utf-8').read()
return text_in_file(handle + '\n', notify_on_post_filename)

View File

@ -62,6 +62,7 @@ from utils import get_user_paths
from utils import get_group_paths
from utils import local_actor_url
from utils import dangerous_svg
from utils import text_in_file
from session import create_session
from session import get_json
from webfinger import webfinger_handle
@ -521,7 +522,7 @@ def _create_person_base(base_dir: str, nickname: str, domain: str, port: int,
os.mkdir(base_dir + private_keys_subdir)
filename = base_dir + private_keys_subdir + '/' + handle + '.key'
try:
with open(filename, 'w+') as text_file:
with open(filename, 'w+', encoding='utf-8') as text_file:
print(private_key_pem, file=text_file)
except OSError:
print('EX: unable to save ' + filename)
@ -532,7 +533,7 @@ def _create_person_base(base_dir: str, nickname: str, domain: str, port: int,
os.mkdir(base_dir + public_keys_subdir)
filename = base_dir + public_keys_subdir + '/' + handle + '.pem'
try:
with open(filename, 'w+') as text_file:
with open(filename, 'w+', encoding='utf-8') as text_file:
print(public_key_pem, file=text_file)
except OSError:
print('EX: unable to save 2 ' + filename)
@ -1207,7 +1208,7 @@ def _remove_tags_for_nickname(base_dir: str, nickname: str,
continue
if not os.path.isfile(tag_filename):
continue
if match_str not in open(tag_filename, encoding='utf-8').read():
if not text_in_file(match_str, tag_filename):
continue
lines = []
with open(tag_filename, 'r', encoding='utf-8') as fp_tag:
@ -1358,8 +1359,7 @@ def is_person_snoozed(base_dir: str, nickname: str, domain: str,
snoozed_filename = acct_dir(base_dir, nickname, domain) + '/snoozed.txt'
if not os.path.isfile(snoozed_filename):
return False
if snooze_actor + ' ' not in open(snoozed_filename,
encoding='utf-8').read():
if not text_in_file(snooze_actor + ' ', snoozed_filename):
return False
# remove the snooze entry if it has timed out
replace_str = None
@ -1391,7 +1391,7 @@ def is_person_snoozed(base_dir: str, nickname: str, domain: str,
except OSError:
print('EX: unable to write ' + snoozed_filename)
if snooze_actor + ' ' in open(snoozed_filename, encoding='utf-8').read():
if text_in_file(snooze_actor + ' ', snoozed_filename):
return True
return False
@ -1406,8 +1406,7 @@ def person_snooze(base_dir: str, nickname: str, domain: str,
return
snoozed_filename = account_dir + '/snoozed.txt'
if os.path.isfile(snoozed_filename):
if snooze_actor + ' ' in open(snoozed_filename,
encoding='utf-8').read():
if text_in_file(snooze_actor + ' ', snoozed_filename):
return
try:
with open(snoozed_filename, 'a+', encoding='utf-8') as snoozed_file:
@ -1428,8 +1427,7 @@ def person_unsnooze(base_dir: str, nickname: str, domain: str,
snoozed_filename = account_dir + '/snoozed.txt'
if not os.path.isfile(snoozed_filename):
return
if snooze_actor + ' ' not in open(snoozed_filename,
encoding='utf-8').read():
if not text_in_file(snooze_actor + ' ', snoozed_filename):
return
replace_str = None
with open(snoozed_filename, 'r', encoding='utf-8') as snoozed_file:

View File

@ -32,6 +32,7 @@ from webfinger import webfinger_handle
from httpsig import create_signed_header
from siteactive import site_is_active
from languages import understood_post_language
from utils import text_in_file
from utils import get_media_descriptions_from_post
from utils import valid_hash_tag
from utils import get_audio_extensions
@ -968,7 +969,7 @@ def _update_hashtags_index(base_dir: str, tag: {}, new_post_id: str) -> None:
tags_filename)
else:
# prepend to tags index file
if tagline not in open(tags_filename, encoding='utf-8').read():
if not text_in_file(tagline, tags_filename):
try:
with open(tags_filename, 'r+', encoding='utf-8') as tags_file:
content = tags_file.read()
@ -990,8 +991,7 @@ def _add_schedule_post(base_dir: str, nickname: str, domain: str,
index_str = event_date_str + ' ' + post_id.replace('/', '#')
if os.path.isfile(schedule_index_filename):
if index_str not in open(schedule_index_filename,
encoding='utf-8').read():
if not text_in_file(index_str, schedule_index_filename):
try:
with open(schedule_index_filename, 'r+',
encoding='utf-8') as schedule_file:
@ -4294,7 +4294,12 @@ def archive_posts_for_person(http_prefix: str, nickname: str, domain: str,
# Time of file creation
full_filename = os.path.join(box_dir, post_filename)
if os.path.isfile(full_filename):
content = open(full_filename, encoding='utf-8').read()
content = ''
try:
with open(full_filename, 'r', encoding='utf-8') as fp_content:
content = fp_content.read()
except OSError:
print('EX: unable to open content ' + full_filename)
if '"published":' in content:
published_str = content.split('"published":')[1]
if '"' in published_str:
@ -4751,8 +4756,7 @@ def populate_replies_json(base_dir: str, nickname: str, domain: str,
message_id2.replace('/', '#') + '.json'
if os.path.isfile(search_filename):
if authorized or \
pub_str in open(search_filename,
encoding='utf-8').read():
text_in_file(pub_str, search_filename):
post_json_object = load_json(search_filename)
if post_json_object:
if post_json_object['object'].get('cc'):
@ -4779,8 +4783,7 @@ def populate_replies_json(base_dir: str, nickname: str, domain: str,
message_id2.replace('/', '#') + '.json'
if os.path.isfile(search_filename):
if authorized or \
pub_str in open(search_filename,
encoding='utf-8').read():
text_in_file(pub_str, search_filename):
# get the json of the reply and append it to
# the collection
post_json_object = load_json(search_filename)

View File

@ -12,6 +12,7 @@ from utils import locate_post
from utils import load_json
from utils import save_json
from utils import has_object_dict
from utils import text_in_file
def question_update_votes(base_dir: str, nickname: str, domain: str,
@ -74,8 +75,7 @@ def question_update_votes(base_dir: str, nickname: str, domain: str,
except OSError:
print('EX: unable to write voters file ' + voters_filename)
else:
if reply_json['actor'] not in open(voters_filename,
encoding='utf-8').read():
if not text_in_file(reply_json['actor'], voters_filename):
# append to the voters file
try:
with open(voters_filename, 'a+',

View File

@ -13,6 +13,7 @@ from utils import save_json
from utils import get_status_number
from utils import remove_domain_port
from utils import acct_dir
from utils import text_in_file
def _clear_role_status(base_dir: str, role: str) -> None:
@ -28,8 +29,7 @@ def _clear_role_status(base_dir: str, role: str) -> None:
if not filename.endswith(".json"):
continue
filename = os.path.join(base_dir + '/accounts/', filename)
if '"' + role + '"' not in open(filename,
encoding='utf-8').read():
if not text_in_file('"' + role + '"', filename):
continue
actor_json = load_json(filename)
if not actor_json:

154
tests.py
View File

@ -54,6 +54,7 @@ from follow import clear_followers
from follow import send_follow_request_via_server
from follow import send_unfollow_request_via_server
from siteactive import site_is_active
from utils import text_in_file
from utils import convert_published_to_local_timezone
from utils import convert_to_snake_case
from utils import get_sha_256
@ -1418,7 +1419,7 @@ def test_post_message_between_servers(base_dir: str) -> None:
bob_domain, None, None)
for _ in range(20):
if 'likes' in open(outbox_post_filename, encoding='utf-8').read():
if text_in_file('likes', outbox_post_filename):
break
time.sleep(1)
@ -1426,7 +1427,7 @@ def test_post_message_between_servers(base_dir: str) -> None:
if alice_post_json:
pprint(alice_post_json)
assert 'likes' in open(outbox_post_filename, encoding='utf-8').read()
assert text_in_file('likes', outbox_post_filename)
print('\n\n*******************************************************')
print("Bob reacts to Alice's post")
@ -1441,7 +1442,7 @@ def test_post_message_between_servers(base_dir: str) -> None:
bob_domain, None, None)
for _ in range(20):
if 'reactions' in open(outbox_post_filename, encoding='utf-8').read():
if text_in_file('reactions', outbox_post_filename):
break
time.sleep(1)
@ -1450,7 +1451,7 @@ def test_post_message_between_servers(base_dir: str) -> None:
pprint(alice_post_json)
# TODO: fix reactions unit test
# assert 'reactions' in open(outbox_post_filename, encoding='utf-8').read()
# assert text_in_file('reactions', outbox_post_filename)
print('\n\n*******************************************************')
print("Bob repeats Alice's post")
@ -1637,17 +1638,14 @@ def test_follow_between_servers(base_dir: str) -> None:
assert valid_inbox(bob_dir, 'bob', bob_domain)
assert valid_inbox_filenames(bob_dir, 'bob', bob_domain,
alice_domain, alice_port)
assert 'alice@' + alice_domain in open(bob_dir + '/accounts/bob@' +
bob_domain +
'/followers.txt',
encoding='utf-8').read()
assert 'bob@' + bob_domain in open(alice_dir + '/accounts/alice@' +
alice_domain + '/following.txt',
encoding='utf-8').read()
assert 'bob@' + bob_domain in open(alice_dir + '/accounts/alice@' +
alice_domain +
'/followingCalendar.txt',
encoding='utf-8').read()
assert text_in_file('alice@' + alice_domain, bob_dir + '/accounts/bob@' +
bob_domain + '/followers.txt')
assert text_in_file('bob@' + bob_domain,
alice_dir + '/accounts/alice@' +
alice_domain + '/following.txt')
assert text_in_file('bob@' + bob_domain,
alice_dir + '/accounts/alice@' +
alice_domain + '/followingCalendar.txt')
assert not is_group_actor(alice_dir, bob_actor, alice_person_cache)
assert not is_group_account(alice_dir, 'alice', alice_domain)
@ -1861,17 +1859,15 @@ def test_shared_items_federation(base_dir: str) -> None:
assert valid_inbox(bob_dir, 'bob', bob_domain)
assert valid_inbox_filenames(bob_dir, 'bob', bob_domain,
alice_domain, alice_port)
assert 'alice@' + alice_domain in open(bob_dir + '/accounts/bob@' +
bob_domain +
'/followers.txt',
encoding='utf-8').read()
assert 'bob@' + bob_domain in open(alice_dir + '/accounts/alice@' +
alice_domain + '/following.txt',
encoding='utf-8').read()
assert 'bob@' + bob_domain in open(alice_dir + '/accounts/alice@' +
alice_domain +
'/followingCalendar.txt',
encoding='utf-8').read()
assert text_in_file('alice@' + alice_domain,
bob_dir + '/accounts/bob@' +
bob_domain + '/followers.txt')
assert text_in_file('bob@' + bob_domain,
alice_dir + '/accounts/alice@' +
alice_domain + '/following.txt')
assert text_in_file('bob@' + bob_domain,
alice_dir + '/accounts/alice@' +
alice_domain + '/followingCalendar.txt')
assert not is_group_actor(alice_dir, bob_actor, alice_person_cache)
assert not is_group_account(bob_dir, 'bob', bob_domain)
@ -2322,17 +2318,15 @@ def test_group_follow(base_dir: str) -> None:
assert valid_inbox(testgroup_dir, 'testgroup', testgroup_domain)
assert valid_inbox_filenames(testgroup_dir, 'testgroup', testgroup_domain,
alice_domain, alice_port)
assert 'alice@' + alice_domain in open(testgroup_followers_filename,
encoding='utf-8').read()
assert '!alice@' + alice_domain not in \
open(testgroup_followers_filename, encoding='utf-8').read()
assert text_in_file('alice@' + alice_domain, testgroup_followers_filename)
assert not text_in_file('!alice@' + alice_domain,
testgroup_followers_filename)
testgroup_webfinger_filename = \
testgroup_dir + '/wfendpoints/testgroup@' + \
testgroup_domain + ':' + str(testgroupPort) + '.json'
assert os.path.isfile(testgroup_webfinger_filename)
assert 'acct:testgroup@' in open(testgroup_webfinger_filename,
encoding='utf-8').read()
assert text_in_file('acct:testgroup@', testgroup_webfinger_filename)
print('acct: exists within the webfinger endpoint for testgroup')
testgroup_handle = 'testgroup@' + testgroup_domain
@ -2347,10 +2341,8 @@ def test_group_follow(base_dir: str) -> None:
assert not is_group_account(alice_dir, 'alice', alice_domain)
assert is_group_account(testgroup_dir, 'testgroup', testgroup_domain)
assert '!testgroup' in following_str
assert testgroup_handle in open(alice_following_filename,
encoding='utf-8').read()
assert testgroup_handle in open(alice_following_calendar_filename,
encoding='utf-8').read()
assert text_in_file(testgroup_handle, alice_following_filename)
assert text_in_file(testgroup_handle, alice_following_calendar_filename)
print('\n\n*********************************************************')
print('Alice follows the test group')
@ -2404,17 +2396,15 @@ def test_group_follow(base_dir: str) -> None:
assert valid_inbox(testgroup_dir, 'testgroup', testgroup_domain)
assert valid_inbox_filenames(testgroup_dir, 'testgroup', testgroup_domain,
bob_domain, bob_port)
assert 'bob@' + bob_domain in open(testgroup_followers_filename,
encoding='utf-8').read()
assert '!bob@' + bob_domain not in \
open(testgroup_followers_filename, encoding='utf-8').read()
assert text_in_file('bob@' + bob_domain, testgroup_followers_filename)
assert not text_in_file('!bob@' + bob_domain,
testgroup_followers_filename)
testgroup_webfinger_filename = \
testgroup_dir + '/wfendpoints/testgroup@' + \
testgroup_domain + ':' + str(testgroupPort) + '.json'
assert os.path.isfile(testgroup_webfinger_filename)
assert 'acct:testgroup@' in open(testgroup_webfinger_filename,
encoding='utf-8').read()
assert text_in_file('acct:testgroup@', testgroup_webfinger_filename)
print('acct: exists within the webfinger endpoint for testgroup')
testgroup_handle = 'testgroup@' + testgroup_domain
@ -2427,10 +2417,8 @@ def test_group_follow(base_dir: str) -> None:
testgroup_domain + ':' + str(testgroupPort))
assert is_group_actor(bob_dir, testgroup_actor, bob_person_cache)
assert '!testgroup' in following_str
assert testgroup_handle in open(bob_following_filename,
encoding='utf-8').read()
assert testgroup_handle in open(bob_following_calendar_filename,
encoding='utf-8').read()
assert text_in_file(testgroup_handle, bob_following_filename)
assert text_in_file(testgroup_handle, bob_following_calendar_filename)
print('Bob follows the test group')
print('\n\n*********************************************************')
@ -3187,30 +3175,27 @@ def test_client_to_server(base_dir: str):
bob_dir + '/accounts/bob@' + bob_domain + '/followers.txt'
for _ in range(10):
if os.path.isfile(bob_followers_filename):
if 'alice@' + alice_domain + ':' + str(alice_port) in \
open(bob_followers_filename,
encoding='utf-8').read():
test_str = 'alice@' + alice_domain + ':' + str(alice_port)
if text_in_file(test_str, bob_followers_filename):
if os.path.isfile(alice_following_filename) and \
os.path.isfile(alice_petnames_filename):
if 'bob@' + bob_domain + ':' + str(bob_port) in \
open(alice_following_filename,
encoding='utf-8').read():
test_str = 'bob@' + bob_domain + ':' + str(bob_port)
if text_in_file(test_str, alice_following_filename):
break
time.sleep(1)
assert os.path.isfile(bob_followers_filename)
assert os.path.isfile(alice_following_filename)
assert os.path.isfile(alice_petnames_filename)
assert 'bob bob@' + bob_domain in \
open(alice_petnames_filename, encoding='utf-8').read()
assert text_in_file('bob bob@' + bob_domain, alice_petnames_filename)
print('alice@' + alice_domain + ':' + str(alice_port) + ' in ' +
bob_followers_filename)
assert 'alice@' + alice_domain + ':' + str(alice_port) in \
open(bob_followers_filename, encoding='utf-8').read()
test_str = 'alice@' + alice_domain + ':' + str(alice_port)
assert text_in_file(test_str, bob_followers_filename)
print('bob@' + bob_domain + ':' + str(bob_port) + ' in ' +
alice_following_filename)
assert 'bob@' + bob_domain + ':' + str(bob_port) in \
open(alice_following_filename, encoding='utf-8').read()
test_str = 'bob@' + bob_domain + ':' + str(bob_port)
assert text_in_file(test_str, alice_following_filename)
assert valid_inbox(bob_dir, 'bob', bob_domain)
assert valid_inbox_filenames(bob_dir, 'bob', bob_domain,
alice_domain, alice_port)
@ -3226,23 +3211,25 @@ def test_client_to_server(base_dir: str):
for _ in range(10):
if os.path.isfile(alice_dir + '/accounts/alice@' + alice_domain +
'/followers.txt'):
if 'bob@' + bob_domain + ':' + str(bob_port) in \
open(alice_dir + '/accounts/alice@' + alice_domain +
'/followers.txt', encoding='utf-8').read():
test_str = 'bob@' + bob_domain + ':' + str(bob_port)
test_filename = \
alice_dir + '/accounts/alice@' + \
alice_domain + '/followers.txt'
if text_in_file(test_str, test_filename):
if os.path.isfile(bob_dir + '/accounts/bob@' + bob_domain +
'/following.txt'):
alice_handle_str = \
'alice@' + alice_domain + ':' + str(alice_port)
if alice_handle_str in \
open(bob_dir + '/accounts/bob@' + bob_domain +
'/following.txt', encoding='utf-8').read():
if text_in_file(alice_handle_str,
bob_dir + '/accounts/bob@' + bob_domain +
'/following.txt'):
if os.path.isfile(bob_dir + '/accounts/bob@' +
bob_domain +
'/followingCalendar.txt'):
if alice_handle_str in \
open(bob_dir + '/accounts/bob@' + bob_domain +
'/followingCalendar.txt',
encoding='utf-8').read():
if text_in_file(alice_handle_str,
bob_dir + '/accounts/bob@' +
bob_domain +
'/followingCalendar.txt'):
break
time.sleep(1)
@ -3250,12 +3237,13 @@ def test_client_to_server(base_dir: str):
'/followers.txt')
assert os.path.isfile(bob_dir + '/accounts/bob@' + bob_domain +
'/following.txt')
assert 'bob@' + bob_domain + ':' + str(bob_port) in \
open(alice_dir + '/accounts/alice@' + alice_domain +
'/followers.txt', encoding='utf-8').read()
assert 'alice@' + alice_domain + ':' + str(alice_port) in \
open(bob_dir + '/accounts/bob@' + bob_domain + '/following.txt',
encoding='utf-8').read()
test_str = 'bob@' + bob_domain + ':' + str(bob_port)
assert text_in_file(test_str, alice_dir + '/accounts/alice@' +
alice_domain + '/followers.txt')
test_str = 'alice@' + alice_domain + ':' + str(alice_port)
assert text_in_file(test_str,
bob_dir + '/accounts/bob@' +
bob_domain + '/following.txt')
session_bob = create_session(proxy_type)
password = 'bobpass'
@ -3458,19 +3446,19 @@ def test_client_to_server(base_dir: str):
cached_webfingers, person_cache,
True, __version__, signing_priv_key_pem)
for _ in range(10):
if 'alice@' + alice_domain + ':' + str(alice_port) not in \
open(bob_followers_filename, encoding='utf-8').read():
if 'bob@' + bob_domain + ':' + str(bob_port) not in \
open(alice_following_filename, encoding='utf-8').read():
test_str = 'alice@' + alice_domain + ':' + str(alice_port)
if not text_in_file(test_str, bob_followers_filename):
test_str = 'bob@' + bob_domain + ':' + str(bob_port)
if not text_in_file(test_str, alice_following_filename):
break
time.sleep(1)
assert os.path.isfile(bob_followers_filename)
assert os.path.isfile(alice_following_filename)
assert 'alice@' + alice_domain + ':' + str(alice_port) \
not in open(bob_followers_filename, encoding='utf-8').read()
assert 'bob@' + bob_domain + ':' + str(bob_port) \
not in open(alice_following_filename, encoding='utf-8').read()
test_str = 'alice@' + alice_domain + ':' + str(alice_port)
assert not text_in_file(test_str, bob_followers_filename)
test_str = 'bob@' + bob_domain + ':' + str(bob_port)
assert not text_in_file(test_str, alice_following_filename)
assert valid_inbox(bob_dir, 'bob', bob_domain)
assert valid_inbox_filenames(bob_dir, 'bob', bob_domain,
alice_domain, alice_port)
@ -7218,7 +7206,7 @@ def _test_diff_content() -> None:
timezone, system_language)
assert html_str
expected = \
'<details><summary class="cw">SHOW EDITS</summary>' + \
'<details><summary class="cw" tabindex="10">SHOW EDITS</summary>' + \
'<p><b>Mon Dec 14, 01:07</b></p><p><label class="diff_add">' + \
'+ This is some content</label><br><label class="diff_remove">' + \
'- This is some previous content</label><br>' + \

View File

@ -17,6 +17,7 @@ from utils import acct_dir
from utils import dangerous_svg
from utils import local_actor_url
from utils import remove_html
from utils import text_in_file
from shutil import copyfile
from shutil import make_archive
from shutil import unpack_archive
@ -65,8 +66,8 @@ def import_theme(base_dir: str, filename: str) -> bool:
# if the theme name in the default themes list?
default_themes_filename = base_dir + '/defaultthemes.txt'
if os.path.isfile(default_themes_filename):
if new_theme_name.title() + '\n' in \
open(default_themes_filename, encoding='utf-8').read():
test_str = new_theme_name.title() + '\n'
if text_in_file(test_str, default_themes_filename):
new_theme_name = new_theme_name + '2'
theme_dir = base_dir + '/theme/' + new_theme_name

View File

@ -40,6 +40,25 @@ INVALID_CHARACTERS = (
)
def text_in_file(text: str, filename: str,
case_sensitive: bool = True) -> bool:
"""is the given text in the given file?
"""
if not case_sensitive:
text = text.lower()
try:
with open(filename, 'r', encoding='utf-8') as file:
content = file.read()
if content:
if not case_sensitive:
content = content.lower()
if text in content:
return True
except OSError:
print('EX: unable to find text in missing file ' + filename)
return False
def local_actor_url(http_prefix: str, nickname: str, domain_full: str) -> str:
"""Returns the url for an actor on this instance
"""
@ -109,32 +128,32 @@ def has_object_dict(post_json_object: {}) -> bool:
def get_content_from_post(post_json_object: {}, system_language: str,
languages_understood: [],
contentType: str = "content") -> str:
content_type: str = "content") -> str:
"""Returns the content from the post in the given language
including searching for a matching entry within contentMap
"""
this_post_json = post_json_object
if has_object_dict(post_json_object):
this_post_json = post_json_object['object']
if not this_post_json.get(contentType):
if not this_post_json.get(content_type):
return ''
content = ''
mapDict = contentType + 'Map'
if this_post_json.get(mapDict):
if isinstance(this_post_json[mapDict], dict):
if this_post_json[mapDict].get(system_language):
sys_lang = this_post_json[mapDict][system_language]
map_dict = content_type + 'Map'
if this_post_json.get(map_dict):
if isinstance(this_post_json[map_dict], dict):
if this_post_json[map_dict].get(system_language):
sys_lang = this_post_json[map_dict][system_language]
if isinstance(sys_lang, str):
return this_post_json[mapDict][system_language]
return this_post_json[map_dict][system_language]
else:
# is there a contentMap/summaryMap entry for one of
# the understood languages?
for lang in languages_understood:
if this_post_json[mapDict].get(lang):
return this_post_json[mapDict][lang]
if this_post_json[map_dict].get(lang):
return this_post_json[map_dict][lang]
else:
if isinstance(this_post_json[contentType], str):
content = this_post_json[contentType]
if isinstance(this_post_json[content_type], str):
content = this_post_json[content_type]
return content
@ -1320,8 +1339,7 @@ def follow_person(base_dir: str, nickname: str, domain: str,
# was this person previously unfollowed?
unfollowed_filename = base_dir + '/accounts/' + handle + '/unfollowed.txt'
if os.path.isfile(unfollowed_filename):
if handle_to_follow in open(unfollowed_filename,
encoding='utf-8').read():
if text_in_file(handle_to_follow, unfollowed_filename):
# remove them from the unfollowed file
new_lines = ''
with open(unfollowed_filename, 'r',
@ -1341,7 +1359,7 @@ def follow_person(base_dir: str, nickname: str, domain: str,
handle_to_follow = '!' + handle_to_follow
filename = base_dir + '/accounts/' + handle + '/' + follow_file
if os.path.isfile(filename):
if handle_to_follow in open(filename, encoding='utf-8').read():
if text_in_file(handle_to_follow, filename):
if debug:
print('DEBUG: follow already exists')
return True
@ -1648,7 +1666,7 @@ def remove_moderation_post_from_index(base_dir: str, post_url: str,
if not os.path.isfile(moderation_index_file):
return
post_id = remove_id_ending(post_url)
if post_id in open(moderation_index_file, encoding='utf-8').read():
if text_in_file(post_id, moderation_index_file):
with open(moderation_index_file, 'r',
encoding='utf-8') as file1:
lines = file1.readlines()
@ -1679,7 +1697,7 @@ def _is_reply_to_blog_post(base_dir: str, nickname: str, domain: str,
return False
post_id = remove_id_ending(post_json_object['object']['inReplyTo'])
post_id = post_id.replace('/', '#')
if post_id in open(blogs_index_filename, encoding='utf-8').read():
if text_in_file(post_id, blogs_index_filename):
return True
return False
@ -1720,8 +1738,7 @@ def _is_bookmarked(base_dir: str, nickname: str, domain: str,
acct_dir(base_dir, nickname, domain) + '/bookmarks.index'
if os.path.isfile(bookmarks_index_filename):
bookmark_index = post_filename.split('/')[-1] + '\n'
if bookmark_index in open(bookmarks_index_filename,
encoding='utf-8').read():
if text_in_file(bookmark_index, bookmarks_index_filename):
return True
return False
@ -3024,8 +3041,7 @@ def dm_allowed_from_domain(base_dir: str,
acct_dir(base_dir, nickname, domain) + '/dmAllowedInstances.txt'
if not os.path.isfile(dm_allowed_instances_file):
return False
if sending_actor_domain + '\n' in open(dm_allowed_instances_file,
encoding='utf-8').read():
if text_in_file(sending_actor_domain + '\n', dm_allowed_instances_file):
return True
return False
@ -3339,8 +3355,7 @@ def is_group_actor(base_dir: str, actor: str, person_cache: {},
if debug:
print('Cached actor file not found ' + cached_actor_filename)
return False
if '"type": "Group"' in open(cached_actor_filename,
encoding='utf-8').read():
if text_in_file('"type": "Group"', cached_actor_filename):
if debug:
print('Group type found in ' + cached_actor_filename)
return True
@ -3353,8 +3368,7 @@ def is_group_account(base_dir: str, nickname: str, domain: str) -> bool:
account_filename = acct_dir(base_dir, nickname, domain) + '.json'
if not os.path.isfile(account_filename):
return False
if '"type": "Group"' in open(account_filename,
encoding='utf-8').read():
if text_in_file('"type": "Group"', account_filename):
return True
return False

View File

@ -487,7 +487,7 @@ def html_edit_links(css_cache: {}, translate: {}, base_dir: str, path: str,
links_filename = base_dir + '/accounts/links.txt'
links_str = ''
if os.path.isfile(links_filename):
with open(links_filename, 'r') as fp_links:
with open(links_filename, 'r', encoding='utf-8') as fp_links:
links_str = fp_links.read()
edit_links_form += \
@ -512,7 +512,7 @@ def html_edit_links(css_cache: {}, translate: {}, base_dir: str, path: str,
about_filename = base_dir + '/accounts/about.md'
about_str = ''
if os.path.isfile(about_filename):
with open(about_filename, 'r') as fp_about:
with open(about_filename, 'r', encoding='utf-8') as fp_about:
about_str = fp_about.read()
edit_links_form += \
@ -531,7 +531,7 @@ def html_edit_links(css_cache: {}, translate: {}, base_dir: str, path: str,
tos_filename = base_dir + '/accounts/tos.md'
tos_str = ''
if os.path.isfile(tos_filename):
with open(tos_filename, 'r') as fp_tos:
with open(tos_filename, 'r', encoding='utf-8') as fp_tos:
tos_str = fp_tos.read()
edit_links_form += \

View File

@ -279,9 +279,9 @@ def _add_embedded_audio(translate: {}, content: str) -> str:
continue
content += \
'<center>\n<span itemprop="audio">' + \
'<audio controls>\n' + \
'<audio controls tabindex="10">\n' + \
'<source src="' + wrd + '" type="audio/' + \
extension.replace('.', '') + '" tabindex="10">' + \
extension.replace('.', '') + '">' + \
translate['Your browser does not support the audio element.'] + \
'</audio>\n</span>\n</center>\n'
return content
@ -324,9 +324,9 @@ def _add_embedded_video(translate: {}, content: str) -> str:
'<figure id="videoContainer" ' + \
'data-fullscreen="false">\n' + \
' <video id="video" controls ' + \
'preload="metadata">\n' + \
'preload="metadata" tabindex="10">\n' + \
'<source src="' + wrd + '" type="video/' + \
extension.replace('.', '') + '" tabindex="10">\n' + \
extension.replace('.', '') + '">\n' + \
translate['Your browser does not support the video element.'] + \
'</video>\n</figure>\n</span>\n</center>\n'
return content

View File

@ -278,7 +278,7 @@ def _html_podcast_soundbites(link_url: str, extension: str,
soundbite_title += ' ' + str(ctr)
podcast_str += \
' <span itemprop="trailer">\n' + \
' <audio controls>\n' + \
' <audio controls tabindex="10">\n' + \
' <p>' + soundbite_title + '</p>\n' + \
' <source src="' + preview_url + '" type="audio/' + \
extension.replace('.', '') + '">' + \
@ -372,7 +372,7 @@ def html_podcast_episode(css_cache: {}, translate: {},
# podcast player widget
podcast_str += \
' <span itemprop="audio">\n' + \
' <audio controls>\n' + \
' <audio controls tabindex="10">\n' + \
' <source src="' + link_url + '" type="audio/' + \
audio_extension.replace('.', '') + '">' + \
translate['Your browser does not support the audio element.'] + \
@ -397,7 +397,8 @@ def html_podcast_episode(css_cache: {}, translate: {},
' <span itemprop="video">\n' + \
' <figure id="videoContainer" ' + \
'data-fullscreen="false">\n' + \
' <video id="video" controls preload="metadata">\n' + \
' <video id="video" controls preload="metadata" ' + \
'tabindex="10">\n' + \
'<source src="' + link_url + '" ' + \
'type="' + video_mime_type + '">' + \
translate[video_msg] + \

View File

@ -11,6 +11,7 @@ import os
from question import is_question
from utils import remove_id_ending
from utils import acct_dir
from utils import text_in_file
def insert_question(base_dir: str, translate: {},
@ -34,7 +35,7 @@ def insert_question(base_dir: str, translate: {},
show_question_results = False
if os.path.isfile(votes_filename):
if message_id in open(votes_filename, encoding='utf-8').read():
if text_in_file(message_id, votes_filename):
show_question_results = True
if not show_question_results:

View File

@ -26,6 +26,7 @@ from utils import get_audio_extensions
from utils import get_video_extensions
from utils import get_image_extensions
from utils import local_actor_url
from utils import text_in_file
from cache import store_person_in_cache
from content import add_html_tags
from content import replace_emoji_from_tags
@ -362,7 +363,7 @@ def scheduled_posts_exist(base_dir: str, nickname: str, domain: str) -> bool:
acct_dir(base_dir, nickname, domain) + '/schedule.index'
if not os.path.isfile(schedule_index_filename):
return False
if '#users#' in open(schedule_index_filename, encoding='utf-8').read():
if text_in_file('#users#', schedule_index_filename):
return True
return False
@ -1205,7 +1206,7 @@ def get_post_attachments_as_html(base_dir: str, domain_full: str,
' <figure id="videoContainer" ' + \
'data-fullscreen="false">\n' + \
' <video id="video" controls ' + \
'preload="metadata">\n'
'preload="metadata" tabindex="10">\n'
gallery_str += \
' <source src="' + attach['url'] + \
'" alt="' + image_description + \
@ -1244,7 +1245,7 @@ def get_post_attachments_as_html(base_dir: str, domain_full: str,
'<center><figure id="videoContainer" ' + \
'data-fullscreen="false">\n' + \
' <video id="video" controls ' + \
'preload="metadata">\n'
'preload="metadata" tabindex="10">\n'
attachment_str += \
'<source src="' + attach['url'] + '" alt="' + \
image_description + '" title="' + image_description + \
@ -1271,7 +1272,7 @@ def get_post_attachments_as_html(base_dir: str, domain_full: str,
gallery_str += \
' <a href="' + attach['url'] + \
'" tabindex="10">\n'
gallery_str += ' <audio controls>\n'
gallery_str += ' <audio controls tabindex="10">\n'
gallery_str += \
' <source src="' + attach['url'] + \
'" alt="' + image_description + \
@ -1306,7 +1307,7 @@ def get_post_attachments_as_html(base_dir: str, domain_full: str,
gallery_str += ' </div>\n'
gallery_str += '</div>\n'
attachment_str += '<center>\n<audio controls>\n'
attachment_str += '<center>\n<audio controls tabindex="10">\n'
attachment_str += \
'<source src="' + attach['url'] + '" alt="' + \
image_description + '" title="' + image_description + \