Function for accounts data directory

main
Bob Mottram 2024-05-12 13:35:26 +01:00
parent c8cf6d4a70
commit f20eaefa94
67 changed files with 531 additions and 396 deletions

14
auth.py
View File

@ -12,6 +12,7 @@ import hashlib
import binascii
import os
import secrets
from utils import data_dir
from utils import is_system_account
from utils import is_memorial_account
from utils import has_users_path
@ -143,7 +144,7 @@ def authorize_basic(base_dir: str, path: str, auth_header: str,
print('basic auth - attempted login using memorial account ' +
nickname + ' in Auth header')
return False
password_file = base_dir + '/accounts/passwords'
password_file = data_dir(base_dir) + '/passwords'
if not os.path.isfile(password_file):
if debug:
print('DEBUG: passwords file missing')
@ -178,10 +179,11 @@ def store_basic_credentials(base_dir: str,
nickname = remove_eol(nickname).strip()
password = remove_eol(password).strip()
if not os.path.isdir(base_dir + '/accounts'):
os.mkdir(base_dir + '/accounts')
dir_str = data_dir(base_dir)
if not os.path.isdir(dir_str):
os.mkdir(dir_str)
password_file = base_dir + '/accounts/passwords'
password_file = dir_str + '/passwords'
store_str = nickname + ':' + _hash_password(password)
if os.path.isfile(password_file):
if text_in_file(nickname + ':', password_file):
@ -226,7 +228,7 @@ def remove_password(base_dir: str, nickname: str) -> None:
"""Removes the password entry for the given nickname
This is called during account removal
"""
password_file = base_dir + '/accounts/passwords'
password_file = data_dir(base_dir) + '/passwords'
if os.path.isfile(password_file):
try:
with open(password_file, 'r', encoding='utf-8') as fin:
@ -291,7 +293,7 @@ def record_login_failure(base_dir: str, ip_address: str,
if not log_to_file:
return
failure_log = base_dir + '/accounts/loginfailures.log'
failure_log = data_dir(base_dir) + '/loginfailures.log'
write_type = 'a+'
if not os.path.isfile(failure_log):
write_type = 'w+'

View File

@ -12,6 +12,7 @@ import json
import time
from session import get_json_valid
from session import create_session
from utils import data_dir
from utils import string_contains
from utils import date_from_string_format
from utils import date_utcnow
@ -240,7 +241,7 @@ def _add_global_block_reason(base_dir: str,
return False
blocking_reasons_filename = \
base_dir + '/accounts/blocking_reasons.txt'
data_dir(base_dir) + '/blocking_reasons.txt'
if not block_nickname.startswith('#'):
# is the handle already blocked?
@ -302,7 +303,7 @@ def add_global_block(base_dir: str,
block_nickname, block_domain,
reason)
blocking_filename = base_dir + '/accounts/blocking.txt'
blocking_filename = data_dir(base_dir) + '/blocking.txt'
if not block_nickname.startswith('#'):
# is the handle already blocked?
block_handle = block_nickname + '@' + block_domain
@ -481,7 +482,7 @@ def _remove_global_block_reason(base_dir: str,
unblock_domain: str) -> bool:
"""Remove a globla block reason
"""
unblocking_filename = base_dir + '/accounts/blocking_reasons.txt'
unblocking_filename = data_dir(base_dir) + '/blocking_reasons.txt'
if not os.path.isfile(unblocking_filename):
return False
@ -524,7 +525,7 @@ def remove_global_block(base_dir: str,
unblock_nickname,
unblock_domain)
unblocking_filename = base_dir + '/accounts/blocking.txt'
unblocking_filename = data_dir(base_dir) + '/blocking.txt'
if not unblock_nickname.startswith('#'):
unblock_handle = unblock_nickname + '@' + unblock_domain
if os.path.isfile(unblocking_filename):
@ -621,7 +622,7 @@ def is_blocked_hashtag(base_dir: str, hashtag: str) -> bool:
# avoid very long hashtags
if len(hashtag) > 32:
return True
global_blocking_filename = base_dir + '/accounts/blocking.txt'
global_blocking_filename = data_dir(base_dir) + '/blocking.txt'
if os.path.isfile(global_blocking_filename):
hashtag = hashtag.strip('\n').strip('\r')
if not hashtag.startswith('#'):
@ -641,7 +642,7 @@ def get_domain_blocklist(base_dir: str) -> str:
for evil in evil_domains:
blocked_str += evil + '\n'
global_blocking_filename = base_dir + '/accounts/blocking.txt'
global_blocking_filename = data_dir(base_dir) + '/blocking.txt'
if not os.path.isfile(global_blocking_filename):
return blocked_str
try:
@ -666,7 +667,7 @@ def update_blocked_cache(base_dir: str,
seconds_since_last_update = curr_time - blocked_cache_last_updated
if seconds_since_last_update < blocked_cache_update_secs:
return blocked_cache_last_updated
global_blocking_filename = base_dir + '/accounts/blocking.txt'
global_blocking_filename = data_dir(base_dir) + '/blocking.txt'
if not os.path.isfile(global_blocking_filename):
return blocked_cache_last_updated
try:
@ -724,7 +725,7 @@ def is_blocked_domain(base_dir: str, domain: str,
return True
else:
# instance block list
global_blocking_filename = base_dir + '/accounts/blocking.txt'
global_blocking_filename = data_dir(base_dir) + '/blocking.txt'
if os.path.isfile(global_blocking_filename):
search_str += '\n'
search_str_short = None
@ -743,7 +744,7 @@ def is_blocked_domain(base_dir: str, domain: str,
print('EX: unable to read ' + global_blocking_filename +
' ' + str(ex))
else:
allow_filename = base_dir + '/accounts/allowedinstances.txt'
allow_filename = data_dir(base_dir) + '/allowedinstances.txt'
# instance allow list
if not short_domain:
if not text_in_file(domain, allow_filename):
@ -766,7 +767,7 @@ def is_blocked_nickname(base_dir: str, nickname: str,
return True
else:
# instance-wide block list
global_blocking_filename = base_dir + '/accounts/blocking.txt'
global_blocking_filename = data_dir(base_dir) + '/blocking.txt'
if os.path.isfile(global_blocking_filename):
search_str += '\n'
try:
@ -818,7 +819,7 @@ def is_blocked(base_dir: str, nickname: str, domain: str,
if blocked_str == block_handle:
return True
else:
global_blocks_filename = base_dir + '/accounts/blocking.txt'
global_blocks_filename = data_dir(base_dir) + '/blocking.txt'
if os.path.isfile(global_blocks_filename):
if block_nickname:
if text_in_file(block_nickname + '@*\n',
@ -832,7 +833,7 @@ def is_blocked(base_dir: str, nickname: str, domain: str,
return True
if not block_federated:
federated_blocks_filename = \
base_dir + '/accounts/block_api.txt'
data_dir(base_dir) + '/block_api.txt'
if os.path.isfile(federated_blocks_filename):
block_federated = []
try:
@ -849,7 +850,7 @@ def is_blocked(base_dir: str, nickname: str, domain: str,
return True
else:
# instance allow list
allow_filename = base_dir + '/accounts/allowedinstances.txt'
allow_filename = data_dir(base_dir) + '/allowedinstances.txt'
short_domain = _get_short_domain(block_domain)
if not short_domain and block_domain:
if not text_in_file(block_domain + '\n', allow_filename):
@ -904,7 +905,7 @@ def allowed_announce(base_dir: str, nickname: str, domain: str,
# non-cached instance level announce blocks
global_announce_blocks_filename = \
base_dir + '/accounts/noannounce.txt'
data_dir(base_dir) + '/noannounce.txt'
if os.path.isfile(global_announce_blocks_filename):
if block_nickname:
if text_in_file(block_nickname + '@*',
@ -1563,7 +1564,7 @@ def outbox_undo_mute(base_dir: str, http_prefix: str,
def broch_mode_is_active(base_dir: str) -> bool:
"""Returns true if broch mode is active
"""
allow_filename = base_dir + '/accounts/allowedinstances.txt'
allow_filename = data_dir(base_dir) + '/allowedinstances.txt'
return os.path.isfile(allow_filename)
@ -1576,7 +1577,7 @@ def set_broch_mode(base_dir: str, domain_full: str, enabled: bool) -> None:
to construct an instance level allow list. Anything arriving
which is then not from one of the allowed domains will be dropped
"""
allow_filename = base_dir + '/accounts/allowedinstances.txt'
allow_filename = data_dir(base_dir) + '/allowedinstances.txt'
if not enabled:
# remove instance allow list
@ -1595,11 +1596,12 @@ def set_broch_mode(base_dir: str, domain_full: str, enabled: bool) -> None:
# generate instance allow list
allowed_domains = [domain_full]
follow_files = ('following.txt', 'followers.txt')
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue
account_dir = os.path.join(base_dir + '/accounts', acct)
account_dir = os.path.join(dir_str, acct)
for follow_file_type in follow_files:
following_filename = account_dir + '/' + follow_file_type
if not os.path.isfile(following_filename):
@ -1639,7 +1641,7 @@ def broch_modeLapses(base_dir: str, lapse_days: int) -> bool:
"""After broch mode is enabled it automatically
elapses after a period of time
"""
allow_filename = base_dir + '/accounts/allowedinstances.txt'
allow_filename = data_dir(base_dir) + '/allowedinstances.txt'
if not os.path.isfile(allow_filename):
return False
last_modified = file_last_modified(allow_filename)
@ -1858,7 +1860,7 @@ def get_blocks_via_server(session, nickname: str, password: str,
def load_blocked_military(base_dir: str) -> {}:
"""Loads a list of nicknames for accounts which block military instances
"""
block_military_filename = base_dir + '/accounts/block_military.txt'
block_military_filename = data_dir(base_dir) + '/block_military.txt'
nicknames_list = []
if os.path.isfile(block_military_filename):
try:
@ -1883,7 +1885,7 @@ def save_blocked_military(base_dir: str, block_military: {}) -> None:
for nickname, _ in block_military.items():
nicknames_str += nickname + '\n'
block_military_filename = base_dir + '/accounts/block_military.txt'
block_military_filename = data_dir(base_dir) + '/block_military.txt'
try:
with open(block_military_filename, 'w+',
encoding='utf-8') as fp_mil:
@ -1921,7 +1923,7 @@ def load_federated_blocks_endpoints(base_dir: str) -> []:
"""
block_federated_endpoints = []
block_api_endpoints_filename = \
base_dir + '/accounts/block_api_endpoints.txt'
data_dir(base_dir) + '/block_api_endpoints.txt'
if os.path.isfile(block_api_endpoints_filename):
new_block_federated_endpoints = []
try:
@ -2033,7 +2035,7 @@ def _update_federated_blocks(session, base_dir: str,
block_federated.append(handle)
block_api_filename = \
base_dir + '/accounts/block_api.txt'
data_dir(base_dir) + '/block_api.txt'
if not new_block_api_str:
print('DEBUG: federated blocklist not loaded: ' + block_api_filename)
if os.path.isfile(block_api_filename):
@ -2057,7 +2059,7 @@ def save_block_federated_endpoints(base_dir: str,
"""Saves a list of blocking API endpoints
"""
block_api_endpoints_filename = \
base_dir + '/accounts/block_api_endpoints.txt'
data_dir(base_dir) + '/block_api_endpoints.txt'
result = []
block_federated_endpoints_str = ''
for endpoint in block_federated_endpoints:
@ -2079,7 +2081,7 @@ def save_block_federated_endpoints(base_dir: str,
except OSError:
print('EX: unable to delete block_api_endpoints.txt')
block_api_filename = \
base_dir + '/accounts/block_api.txt'
data_dir(base_dir) + '/block_api.txt'
if os.path.isfile(block_api_filename):
try:
os.remove(block_api_filename)

24
blog.py
View File

@ -16,6 +16,7 @@ from webapp_utils import html_footer
from webapp_utils import get_post_attachments_as_html
from webapp_utils import edit_text_area
from webapp_media import add_embedded_elements
from utils import data_dir
from utils import remove_link_tracking
from utils import get_url_from_post
from utils import date_from_string_format
@ -704,11 +705,12 @@ def _no_of_blog_accounts(base_dir: str) -> int:
"""Returns the number of blog accounts
"""
ctr = 0
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue
account_dir = os.path.join(base_dir + '/accounts', acct)
account_dir = os.path.join(dir_str, acct)
blogs_index = account_dir + '/tlblogs.index'
if os.path.isfile(blogs_index):
ctr += 1
@ -719,11 +721,12 @@ def _no_of_blog_accounts(base_dir: str) -> int:
def _single_blog_account_nickname(base_dir: str) -> str:
"""Returns the nickname of a single blog account
"""
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue
account_dir = os.path.join(base_dir + '/accounts', acct)
account_dir = os.path.join(dir_str, acct)
blogs_index = account_dir + '/tlblogs.index'
if os.path.isfile(blogs_index):
return acct.split('@')[0]
@ -760,11 +763,12 @@ def html_blog_view(authorized: bool,
domain_full = get_full_domain(domain, port)
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue
account_dir = os.path.join(base_dir + '/accounts', acct)
account_dir = os.path.join(dir_str, acct)
blogs_index = account_dir + '/tlblogs.index'
if os.path.isfile(blogs_index):
blog_str += '<p class="blogaccount">'
@ -796,13 +800,13 @@ def html_edit_blog(media_instance: bool, translate: {},
edit_blog_text = \
'<h1">' + translate['Write your post text below.'] + '</h1>'
if os.path.isfile(base_dir + '/accounts/newpost.txt'):
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/newpost.txt'):
try:
with open(base_dir + '/accounts/newpost.txt', 'r',
encoding='utf-8') as file:
with open(dir_str + '/newpost.txt', 'r', encoding='utf-8') as file:
edit_blog_text = '<p>' + file.read() + '</p>'
except OSError:
print('EX: unable to read ' + base_dir + '/accounts/newpost.txt')
print('EX: unable to read ' + dir_str + '/newpost.txt')
css_filename = base_dir + '/epicyon-profile.css'
if os.path.isfile(base_dir + '/epicyon.css'):

View File

@ -9,6 +9,7 @@ __module_group__ = "RSS Feeds"
import os
import datetime
from utils import data_dir
from utils import date_utcnow
from utils import date_epoch
@ -181,7 +182,7 @@ def get_hashtag_categories(base_dir: str,
def update_hashtag_categories(base_dir: str) -> None:
"""Regenerates the list of hashtag categories
"""
category_list_filename = base_dir + '/accounts/categoryList.txt'
category_list_filename = data_dir(base_dir) + '/categoryList.txt'
hashtag_categories = get_hashtag_categories(base_dir, False, None)
if not hashtag_categories:
if os.path.isfile(category_list_filename):

View File

@ -15,6 +15,7 @@ import email.parser
import urllib.parse
from shutil import copyfile
from dateutil.parser import parse
from utils import data_dir
from utils import remove_link_tracking
from utils import string_contains
from utils import string_ends_with
@ -383,7 +384,7 @@ def _update_common_emoji(base_dir: str, emoji_content: str) -> None:
emoji_content = _get_emoji_name_from_code(base_dir, emoji_code)
if not emoji_content:
return
common_emoji_filename = base_dir + '/accounts/common_emoji.txt'
common_emoji_filename = data_dir(base_dir) + '/common_emoji.txt'
common_emoji = None
if os.path.isfile(common_emoji_filename):
try:
@ -2297,7 +2298,8 @@ def load_auto_cw_cache(base_dir: str) -> {}:
for each account
"""
auto_cw_cache = {}
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for handle in dirs:
if not is_account_dir(handle):
continue

View File

@ -9,6 +9,7 @@ __module_group__ = "Core"
import os
import time
from utils import data_dir
from utils import save_json
from utils import user_agent_domain
from utils import remove_eol
@ -49,15 +50,15 @@ def update_known_crawlers(ua_str: str,
for uagent in remove_crawlers:
del known_crawlers[uagent]
# save the list of crawlers
save_json(known_crawlers,
base_dir + '/accounts/knownCrawlers.json')
dir_str = data_dir(base_dir)
save_json(known_crawlers, dir_str + '/knownCrawlers.json')
return curr_time
def load_known_web_bots(base_dir: str) -> []:
"""Returns a list of known web bots
"""
known_bots_filename = base_dir + '/accounts/knownBots.txt'
known_bots_filename = data_dir(base_dir) + '/knownBots.txt'
if not os.path.isfile(known_bots_filename):
return []
crawlers_str = None
@ -85,7 +86,7 @@ def load_known_web_bots(base_dir: str) -> []:
def _save_known_web_bots(base_dir: str, known_bots: []) -> bool:
"""Saves a list of known web bots
"""
known_bots_filename = base_dir + '/accounts/knownBots.txt'
known_bots_filename = data_dir(base_dir) + '/knownBots.txt'
known_bots_str = ''
for crawler in known_bots:
known_bots_str += crawler.strip() + '\n'

View File

@ -50,6 +50,7 @@ from shares import expire_shares
from categories import load_city_hashtags
from categories import update_hashtag_categories
from languages import load_default_post_languages
from utils import data_dir
from utils import string_contains
from utils import check_bad_path
from utils import acct_handle_dir
@ -161,7 +162,8 @@ class PubServer(BaseHTTPRequestHandler):
print(endpoint_type.upper() + ' no nickname ' + self.path)
http_400(self)
return
if not os.path.isdir(self.server.base_dir + '/accounts/' +
dir_str = data_dir(self.server.base_dir)
if not os.path.isdir(dir_str + '/' +
nickname + '@' + self.server.domain):
print(endpoint_type.upper() +
' for non-existent account ' + self.path)
@ -333,7 +335,7 @@ class PubServer(BaseHTTPRequestHandler):
if avatar_file.startswith('avatar'):
avatar_file = 'avatar.' + avatar_file_ext
media_filename = \
self.server.base_dir + '/accounts/' + \
data_dir(self.server.base_dir) + '/' + \
nickname + '@' + self.server.domain + '/' + \
avatar_file
else:
@ -348,7 +350,7 @@ class PubServer(BaseHTTPRequestHandler):
if banner_file.startswith('banner'):
banner_file = 'banner.' + banner_file_ext
media_filename = \
self.server.base_dir + '/accounts/' + \
data_dir(self.server.base_dir) + '/' + \
nickname + '@' + self.server.domain + '/' + \
banner_file
@ -654,7 +656,8 @@ def run_shares_expire_watchdog(project_version: str, httpd) -> None:
def load_tokens(base_dir: str, tokens_dict: {}, tokens_lookup: {}) -> None:
"""Loads shared items access tokens for each account
"""
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for handle in dirs:
if '@' in handle:
token_filename = acct_handle_dir(base_dir, handle) + '/.token'
@ -767,9 +770,10 @@ def run_daemon(no_of_books: int,
server_address = ('', proxy_port)
pub_handler = partial(PubServer)
if not os.path.isdir(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
if not os.path.isdir(dir_str):
print('Creating accounts directory')
os.mkdir(base_dir + '/accounts')
os.mkdir(dir_str)
httpd = None
try:
@ -850,7 +854,7 @@ def run_daemon(no_of_books: int,
httpd.public_replies_unlisted = public_replies_unlisted
# load a list of dogwhistle words
dogwhistles_filename = base_dir + '/accounts/dogwhistles.txt'
dogwhistles_filename = data_dir(base_dir) + '/dogwhistles.txt'
if not os.path.isfile(dogwhistles_filename):
dogwhistles_filename = base_dir + '/default_dogwhistles.txt'
httpd.dogwhistles = load_dogwhistles(dogwhistles_filename)
@ -886,7 +890,7 @@ def run_daemon(no_of_books: int,
httpd.dm_license_url = ''
# fitness metrics
fitness_filename = base_dir + '/accounts/fitness.json'
fitness_filename = data_dir(base_dir) + '/fitness.json'
httpd.fitness = {}
if os.path.isfile(fitness_filename):
fitness = load_json(fitness_filename)
@ -1186,11 +1190,12 @@ def run_daemon(no_of_books: int,
# whether to enable broch mode, which locks down the instance
set_broch_mode(base_dir, httpd.domain_full, broch_mode)
if not os.path.isdir(base_dir + '/accounts/inbox@' + domain):
dir_str = data_dir(base_dir)
if not os.path.isdir(dir_str + '/inbox@' + domain):
print('Creating shared inbox: inbox@' + domain)
create_shared_inbox(base_dir, 'inbox', domain, port, http_prefix)
if not os.path.isdir(base_dir + '/accounts/news@' + domain):
if not os.path.isdir(dir_str + '/news@' + domain):
print('Creating news inbox: news@' + domain)
create_news_inbox(base_dir, domain, port, http_prefix)
set_config_param(base_dir, "listsEnabled", "Murdoch press")
@ -1198,7 +1203,7 @@ def run_daemon(no_of_books: int,
# dict of known web crawlers accessing nodeinfo or the masto API
# and how many times they have been seen
httpd.known_crawlers = {}
known_crawlers_filename = base_dir + '/accounts/knownCrawlers.json'
known_crawlers_filename = dir_str + '/knownCrawlers.json'
if os.path.isfile(known_crawlers_filename):
httpd.known_crawlers = load_json(known_crawlers_filename)
# when was the last crawler seen?

View File

@ -85,6 +85,7 @@ from httpcodes import http_304
from httpcodes import http_400
from httpcodes import http_503
from httpcodes import write2
from utils import data_dir
from utils import user_agent_domain
from utils import local_network_host
from utils import permitted_dir
@ -2409,8 +2410,7 @@ def daemon_http_get(self) -> None:
if (is_image_file(self.path) and
(self.path.startswith('/login.') or
self.path.startswith('/qrcode.png'))):
icon_filename = \
self.server.base_dir + '/accounts' + self.path
icon_filename = data_dir(self.server.base_dir) + self.path
if os.path.isfile(icon_filename):
if etag_exists(self, icon_filename):
# The file has not changed

View File

@ -18,6 +18,7 @@ from httpcodes import write2
from httpcodes import http_304
from httpcodes import http_404
from httpheaders import set_headers_etag
from utils import data_dir
from utils import get_nickname_from_actor
from utils import media_file_mime_type
from utils import get_image_mime_type
@ -689,7 +690,7 @@ def show_background_image(self, path: str,
# follow screen background image
if path.endswith('/' + bg_im + '-background.' + ext):
bg_filename = \
base_dir + '/accounts/' + \
data_dir(base_dir) + '/' + \
bg_im + '-background.' + ext
if os.path.isfile(bg_filename):
if etag_exists(self, bg_filename):

View File

@ -16,6 +16,7 @@ from httpheaders import set_headers
from newswire import get_rss_from_dict
from fitnessFunctions import fitness_performance
from posts import is_moderator
from utils import data_dir
from utils import local_actor_url
from utils import save_json
from webapp_column_right import html_edit_news_post
@ -89,7 +90,7 @@ def newswire_vote(self, calling_domain: str, path: str,
newswire_item[votes_index].append('vote:' + nickname)
filename = newswire_item[filename_index]
newswire_state_filename = \
base_dir + '/accounts/.newswirestate.json'
data_dir(base_dir) + '/.newswirestate.json'
try:
save_json(newswire, newswire_state_filename)
except BaseException as ex:
@ -144,7 +145,7 @@ def newswire_unvote(self, calling_domain: str, path: str,
newswire_item[votes_index].remove('vote:' + nickname)
filename = newswire_item[filename_index]
newswire_state_filename = \
base_dir + '/accounts/.newswirestate.json'
data_dir(base_dir) + '/.newswirestate.json'
try:
save_json(newswire, newswire_state_filename)
except BaseException as ex:

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Core GET"
import os
from utils import data_dir
from utils import is_account_dir
from utils import acct_dir
from session import establish_session
@ -97,7 +98,8 @@ def get_rss2site(self, calling_domain: str, path: str,
return
msg = ''
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue

View File

@ -10,6 +10,7 @@ __module_group__ = "Core POST"
import os
import errno
from socket import error as SocketError
from utils import data_dir
from utils import dangerous_markup
from utils import get_instance_url
from utils import get_nickname_from_actor
@ -194,10 +195,10 @@ def links_update(self, calling_domain: str, cookie: str,
self.server.postreq_busy = False
return
links_filename = base_dir + '/accounts/links.txt'
about_filename = base_dir + '/accounts/about.md'
tos_filename = base_dir + '/accounts/tos.md'
specification_filename = base_dir + '/accounts/activitypub.md'
links_filename = data_dir(base_dir) + '/links.txt'
about_filename = data_dir(base_dir) + '/about.md'
tos_filename = data_dir(base_dir) + '/tos.md'
specification_filename = data_dir(base_dir) + '/activitypub.md'
if not boundary:
if b'--LYNX' in post_bytes:

View File

@ -25,6 +25,7 @@ from httpheaders import redirect_headers
from httpheaders import clear_login_details
from webapp_login import html_get_login_credentials
from webapp_suspended import html_suspended
from utils import data_dir
from utils import acct_dir
from utils import is_suspended
from utils import is_local_network_address
@ -191,8 +192,7 @@ def post_login_screen(self, calling_domain: str, cookie: str,
self.server.tokens[login_nickname] = token
login_handle = login_nickname + '@' + domain
token_filename = \
base_dir + '/accounts/' + \
login_handle + '/.token'
data_dir(base_dir) + '/' + login_handle + '/.token'
try:
with open(token_filename, 'w+',
encoding='utf-8') as fp_tok:
@ -201,9 +201,9 @@ def post_login_screen(self, calling_domain: str, cookie: str,
print('EX: Unable to save token for ' +
login_nickname + ' ' + str(ex))
dir_str = data_dir(base_dir)
person_upgrade_actor(base_dir, None,
base_dir + '/accounts/' +
login_handle + '.json')
dir_str + '/' + login_handle + '.json')
index = self.server.tokens[login_nickname]
self.server.tokens_lookup[index] = login_nickname

View File

@ -11,6 +11,7 @@ import os
import errno
import urllib.parse
from socket import error as SocketError
from utils import data_dir
from utils import delete_post
from utils import locate_post
from utils import get_full_domain
@ -146,8 +147,8 @@ def moderator_actions(self, path: str, calling_domain: str, cookie: str,
# is this a local nickname on this instance?
local_handle = \
search_handle + '@' + domain
if os.path.isdir(base_dir +
'/accounts/' + local_handle):
dir_str = data_dir(base_dir)
if os.path.isdir(dir_str + '/' + local_handle):
search_handle = local_handle
else:
search_handle = ''

View File

@ -10,6 +10,7 @@ __module_group__ = "Core POST"
import os
import errno
from socket import error as SocketError
from utils import data_dir
from utils import clear_from_post_caches
from utils import remove_id_ending
from utils import save_json
@ -95,7 +96,7 @@ def newswire_update(self, calling_domain: str, cookie: str,
self.server.postreq_busy = False
return
newswire_filename = base_dir + '/accounts/newswire.txt'
newswire_filename = data_dir(base_dir) + '/newswire.txt'
if not boundary:
if b'--LYNX' in post_bytes:
@ -140,8 +141,7 @@ def newswire_update(self, calling_domain: str, cookie: str,
# save filtered words list for the newswire
filter_newswire_filename = \
base_dir + '/accounts/' + \
'news@' + domain + '/filters.txt'
data_dir(base_dir) + '/' + 'news@' + domain + '/filters.txt'
if fields.get('filteredWordsNewswire'):
try:
with open(filter_newswire_filename, 'w+',
@ -158,7 +158,7 @@ def newswire_update(self, calling_domain: str, cookie: str,
filter_newswire_filename)
# save dogwhistle words list
dogwhistles_filename = base_dir + '/accounts/dogwhistles.txt'
dogwhistles_filename = data_dir(base_dir) + '/dogwhistles.txt'
if fields.get('dogwhistleWords'):
try:
with open(dogwhistles_filename, 'w+',
@ -179,8 +179,7 @@ def newswire_update(self, calling_domain: str, cookie: str,
self.server.dogwhistles = {}
# save news tagging rules
hashtag_rules_filename = \
base_dir + '/accounts/hashtagrules.txt'
hashtag_rules_filename = data_dir(base_dir) + '/hashtagrules.txt'
if fields.get('hashtagRulesList'):
try:
with open(hashtag_rules_filename, 'w+',
@ -196,8 +195,7 @@ def newswire_update(self, calling_domain: str, cookie: str,
print('EX: _newswire_update unable to delete ' +
hashtag_rules_filename)
newswire_tusted_filename = \
base_dir + '/accounts/newswiretrusted.txt'
newswire_tusted_filename = data_dir(base_dir) + '/newswiretrusted.txt'
if fields.get('trustedNewswire'):
newswire_trusted = fields['trustedNewswire']
if not newswire_trusted.endswith('\n'):
@ -448,7 +446,7 @@ def news_post_edit(self, calling_domain: str, cookie: str,
first_paragraph_from_string(news_post_content)
# save newswire
newswire_state_filename = \
base_dir + '/accounts/.newswirestate.json'
data_dir(base_dir) + '/.newswirestate.json'
try:
save_json(newswire, newswire_state_filename)
except BaseException as ex:

View File

@ -14,6 +14,7 @@ from socket import error as SocketError
from blocking import save_blocked_military
from httpheaders import redirect_headers
from httpheaders import clear_login_details
from utils import data_dir
from utils import set_premium_account
from utils import is_premium_account
from utils import remove_avatar_from_cache
@ -241,7 +242,7 @@ def _profile_post_peertube_instances(base_dir: str, fields: {}, self,
peertube_instances: []) -> None:
""" HTTP POST save peertube instances list
"""
peertube_instances_file = base_dir + '/accounts/peertube.txt'
peertube_instances_file = data_dir(base_dir) + '/peertube.txt'
if fields.get('ptInstances'):
peertube_instances.clear()
try:
@ -309,7 +310,7 @@ def _profile_post_buy_domains(base_dir: str, fields: {}, self) -> None:
buy_sites[buy_icon_text] = site_url.strip()
if str(self.server.buy_sites) != str(buy_sites):
self.server.buy_sites = buy_sites
buy_sites_filename = base_dir + '/accounts/buy_sites.json'
buy_sites_filename = data_dir(base_dir) + '/buy_sites.json'
if buy_sites:
save_json(buy_sites, buy_sites_filename)
else:
@ -2538,7 +2539,7 @@ def profile_edit(self, calling_domain: str, cookie: str,
# time is an image with metadata publicly exposed,
# even for a few mS
if m_type == 'instanceLogo':
filename_base = base_dir + '/accounts/login.temp'
filename_base = data_dir(base_dir) + '/login.temp'
elif m_type == 'importTheme':
if not os.path.isdir(base_dir + '/imports'):
os.mkdir(base_dir + '/imports')

View File

@ -72,6 +72,7 @@ from tests import test_update_actor
from tests import run_all_tests
from auth import store_basic_credentials
from auth import create_password
from utils import data_dir
from utils import string_ends_with
from utils import remove_html
from utils import remove_eol
@ -3090,7 +3091,7 @@ def _command_options() -> None:
if not os.path.isdir(account_dir):
print('Account ' + nickname + '@' + domain + ' not found')
sys.exit()
password_file = base_dir + '/accounts/passwords'
password_file = data_dir(base_dir) + '/passwords'
if os.path.isfile(password_file):
if text_in_file(nickname + ':', password_file):
store_basic_credentials(base_dir, nickname, new_password)
@ -3496,8 +3497,9 @@ def _command_options() -> None:
if os.path.isdir(base_dir + '/tags'):
shutil.rmtree(base_dir + '/tags',
ignore_errors=False, onerror=None)
if os.path.isdir(base_dir + '/accounts'):
shutil.rmtree(base_dir + '/accounts',
dir_str = data_dir(base_dir)
if os.path.isdir(dir_str):
shutil.rmtree(dir_str,
ignore_errors=False, onerror=None)
if os.path.isdir(base_dir + '/keys'):
shutil.rmtree(base_dir + '/keys',

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Moderation"
import os
from utils import data_dir
from utils import acct_dir
from utils import text_in_file
from utils import remove_eol
@ -42,7 +43,7 @@ def add_global_filter(base_dir: str, words: str) -> bool:
return False
if len(words) < 2:
return False
filters_filename = base_dir + '/accounts/filters.txt'
filters_filename = data_dir(base_dir) + '/filters.txt'
if os.path.isfile(filters_filename):
if text_in_file(words, filters_filename):
return False
@ -85,7 +86,7 @@ def remove_filter(base_dir: str, nickname: str, domain: str,
def remove_global_filter(base_dir: str, words: str) -> bool:
"""Removes a global word filter
"""
filters_filename = base_dir + '/accounts/filters.txt'
filters_filename = data_dir(base_dir) + '/filters.txt'
if not os.path.isfile(filters_filename):
return False
if not text_in_file(words, filters_filename):
@ -161,7 +162,7 @@ def is_filtered_globally(base_dir: str, content: str,
system_language: str) -> bool:
"""Is the given content globally filtered?
"""
global_filters_filename = base_dir + '/accounts/filters.txt'
global_filters_filename = data_dir(base_dir) + '/filters.txt'
if _is_filtered_base(global_filters_filename, content,
system_language):
return True

View File

@ -11,6 +11,7 @@ import os
import time
from webapp_utils import html_header_with_external_style
from webapp_utils import html_footer
from utils import data_dir
from utils import get_config_param
from utils import save_json
@ -125,7 +126,7 @@ def html_watch_points_graph(base_dir: str, fitness: {}, fitness_id: str,
def fitness_thread(base_dir: str, fitness: {}) -> None:
"""Thread used to save fitness function scores
"""
fitness_filename = base_dir + '/accounts/fitness.json'
fitness_filename = data_dir(base_dir) + '/fitness.json'
while True:
# every 10 mins
time.sleep(60 * 10)

View File

@ -34,6 +34,7 @@ from utils import local_actor_url
from utils import text_in_file
from utils import remove_eol
from utils import get_actor_from_post
from utils import data_dir
from acceptreject import create_accept
from acceptreject import create_reject
from webfinger import webfinger_handle
@ -49,11 +50,12 @@ def create_initial_last_seen(base_dir: str, http_prefix: str) -> None:
The lastseen files are used to generate the Zzz icons on
follows/following lists on the profile screen.
"""
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue
account_dir = os.path.join(base_dir + '/accounts', acct)
account_dir = os.path.join(dir_str, acct)
following_filename = account_dir + '/following.txt'
if not os.path.isfile(following_filename):
continue
@ -318,8 +320,9 @@ def unfollow_account(base_dir: str, nickname: str, domain: str,
handle_to_unfollow = follow_nickname + '@' + follow_domain
if group_account:
handle_to_unfollow = '!' + handle_to_unfollow
if not os.path.isdir(base_dir + '/accounts'):
os.mkdir(base_dir + '/accounts')
dir_str = data_dir(base_dir)
if not os.path.isdir(dir_str):
os.mkdir(dir_str)
handle_dir = acct_handle_dir(base_dir, handle)
if not os.path.isdir(handle_dir):
os.mkdir(handle_dir)
@ -390,8 +393,9 @@ def clear_follows(base_dir: str, nickname: str, domain: str,
follow_file: str) -> None:
"""Removes all follows
"""
if not os.path.isdir(base_dir + '/accounts'):
os.mkdir(base_dir + '/accounts')
dir_str = data_dir(base_dir)
if not os.path.isdir(dir_str):
os.mkdir(dir_str)
accounts_dir = acct_dir(base_dir, nickname, domain)
if not os.path.isdir(accounts_dir):
os.mkdir(accounts_dir)
@ -602,7 +606,7 @@ def follow_approval_required(base_dir: str, nickname_to_follow: str,
manually_approve_follows = False
domain_to_follow = remove_domain_port(domain_to_follow)
actor_filename = base_dir + '/accounts/' + \
actor_filename = data_dir(base_dir) + '/' + \
nickname_to_follow + '@' + domain_to_follow + '.json'
if os.path.isfile(actor_filename):
actor = load_json(actor_filename)
@ -1428,7 +1432,8 @@ def get_followers_of_actor(base_dir: str, actor: str, debug: bool) -> {}:
if debug:
print('DEBUG: searching for handle ' + actor_handle)
# for each of the accounts
for subdir, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for subdir, dirs, _ in os.walk(dir_str):
for account in dirs:
if '@' not in account:
continue

View File

@ -10,6 +10,12 @@ __module_group__ = "Calendar"
import os
def _data_dir2(base_dir) -> str:
"""Returns the directory where account data is stored
"""
return base_dir + '/accounts'
def _text_in_file2(text: str, filename: str,
case_sensitive: bool) -> bool:
"""is the given text in the given file?
@ -32,7 +38,7 @@ def _text_in_file2(text: str, filename: str,
def _dir_acct(base_dir: str, nickname: str, domain: str) -> str:
"""Returns the directory of an account
"""
return base_dir + '/accounts/' + nickname + '@' + domain
return _data_dir2(base_dir) + '/' + nickname + '@' + domain
def _port_domain_remove(domain: str) -> str:

View File

@ -10,6 +10,7 @@ __module_group__ = "Core"
import os
import time
import random
from utils import data_dir
from utils import get_full_domain
from utils import is_account_dir
from utils import get_nickname_from_actor
@ -184,12 +185,13 @@ def _update_import_following(base_dir: str,
def run_import_following(base_dir: str, httpd):
"""Sends out follow requests for imported following csv files
"""
dir_str = data_dir(base_dir)
while True:
time.sleep(20)
# get a list of accounts on the instance, in random sequence
accounts_list = []
for _, dirs, _ in os.walk(base_dir + '/accounts'):
for _, dirs, _ in os.walk(dir_str):
for account in dirs:
if '@' not in account:
continue
@ -203,7 +205,7 @@ def run_import_following(base_dir: str, httpd):
# check if each accounts has an import csv
random.shuffle(accounts_list)
for account in accounts_list:
account_dir = base_dir + '/accounts/' + account
account_dir = dir_str + '/' + account
import_filename = account_dir + '/import_following.csv'
if not os.path.isfile(import_filename):

View File

@ -87,6 +87,7 @@ from utils import valid_hash_tag
from utils import get_attributed_to
from utils import get_reply_to
from utils import get_actor_from_post
from utils import data_dir
from categories import get_hashtag_categories
from categories import set_hashtag_category
from httpsig import get_digest_algorithm_from_headers
@ -193,7 +194,7 @@ def cache_svg_images(session, base_dir: str, http_prefix: str,
actor = 'unknown'
if post_attachments:
actor = get_attributed_to(obj['attributedTo'])
log_filename = base_dir + '/accounts/svg_scripts_log.txt'
log_filename = data_dir(base_dir) + '/svg_scripts_log.txt'
for index in range(len(post_attachments)):
attach = post_attachments[index]
if not attach.get('mediaType'):
@ -855,7 +856,7 @@ def save_post_to_inbox_queue(base_dir: str, http_prefix: str,
inbox_queue_dir = create_inbox_queue_dir(nickname, domain, base_dir)
handle = nickname + '@' + domain
destination = base_dir + '/accounts/' + \
destination = data_dir(base_dir) + '/' + \
handle + '/inbox/' + post_id.replace('/', '#') + '.json'
filename = inbox_queue_dir + '/' + post_id.replace('/', '#') + '.json'
@ -930,7 +931,7 @@ def _inbox_post_recipients_add(base_dir: str, to_list: [],
recipients_dict[handle] = None
else:
if debug:
print('DEBUG: ' + base_dir + '/accounts/' +
print('DEBUG: ' + data_dir(base_dir) + '/' +
handle + ' does not exist')
else:
if debug:
@ -1190,11 +1191,12 @@ def _notify_moved(base_dir: str, domain_full: str,
http_prefix: str) -> None:
"""Notify that an actor has moved
"""
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for account in dirs:
if not is_account_dir(account):
continue
account_dir = base_dir + '/accounts/' + account
account_dir = dir_str + '/' + account
following_filename = account_dir + '/following.txt'
if not os.path.isfile(following_filename):
continue
@ -1321,7 +1323,7 @@ def _person_receive_update(base_dir: str,
new_actor = prev_nickname + '@' + prev_domain_full + ' ' + \
new_nickname + '@' + new_domain_full
refollow_str = ''
refollow_filename = base_dir + '/accounts/actors_moved.txt'
refollow_filename = data_dir(base_dir) + '/actors_moved.txt'
refollow_file_exists = False
if os.path.isfile(refollow_filename):
try:
@ -5534,9 +5536,10 @@ def clear_queue_items(base_dir: str, queue: []) -> None:
"""
ctr = 0
queue.clear()
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for account in dirs:
queue_dir = base_dir + '/accounts/' + account + '/queue'
queue_dir = dir_str + '/' + account + '/queue'
if not os.path.isdir(queue_dir):
continue
for _, _, queuefiles in os.walk(queue_dir):
@ -5557,9 +5560,10 @@ def _restore_queue_items(base_dir: str, queue: []) -> None:
"""Checks the queue for each account and appends filenames
"""
queue.clear()
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for account in dirs:
queue_dir = base_dir + '/accounts/' + account + '/queue'
queue_dir = dir_str + '/' + account + '/queue'
if not os.path.isdir(queue_dir):
continue
for _, _, queuefiles in os.walk(queue_dir):
@ -5715,7 +5719,7 @@ def _check_json_signature(base_dir: str, queue_json: {}) -> (bool, bool):
has_json_signature = True
else:
unknown_contexts_file = \
base_dir + '/accounts/unknownContexts.txt'
data_dir(base_dir) + '/unknownContexts.txt'
unknown_context = str(original_json['@context'])
print('unrecognized @context: ' + unknown_context)
@ -5736,7 +5740,7 @@ def _check_json_signature(base_dir: str, queue_json: {}) -> (bool, bool):
print('Unrecognized jsonld signature type: ' + jwebsig_type)
unknown_signatures_file = \
base_dir + '/accounts/unknownJsonSignatures.txt'
data_dir(base_dir) + '/unknownJsonSignatures.txt'
already_unknown = False
if os.path.isfile(unknown_signatures_file):
@ -6446,7 +6450,7 @@ def run_inbox_queue(server,
debug)
inbox_start_time = time.time()
dogwhistles_filename = base_dir + '/accounts/dogwhistles.txt'
dogwhistles_filename = data_dir(base_dir) + '/dogwhistles.txt'
if not os.path.isfile(dogwhistles_filename):
dogwhistles_filename = base_dir + '/default_dogwhistles.txt'
dogwhistles = load_dogwhistles(dogwhistles_filename)

View File

@ -10,6 +10,7 @@ __module_group__ = "Core"
import json
import os
from urllib import request, parse
from utils import data_dir
from utils import is_account_dir
from utils import acct_dir
from utils import get_actor_languages_list
@ -364,7 +365,8 @@ def load_default_post_languages(base_dir: str) -> {}:
for new posts for each account
"""
result = {}
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for handle in dirs:
if not is_account_dir(handle):
continue

View File

@ -18,6 +18,7 @@ from utils import get_attachment_property_value
from utils import no_of_accounts
from utils import get_status_count
from utils import lines_in_file
from utils import data_dir
def _meta_data_instance_v1(show_accounts: bool,
@ -31,7 +32,7 @@ def _meta_data_instance_v1(show_accounts: bool,
""" /api/v1/instance endpoint
"""
admin_actor_filename = \
base_dir + '/accounts/' + admin_nickname + '@' + domain + '.json'
data_dir(base_dir) + '/' + admin_nickname + '@' + domain + '.json'
if not os.path.isfile(admin_actor_filename):
return {}
@ -41,8 +42,7 @@ def _meta_data_instance_v1(show_accounts: bool,
return {}
rules_list = []
rules_filename = \
base_dir + '/accounts/tos.md'
rules_filename = data_dir(base_dir) + '/tos.md'
if os.path.isfile(rules_filename):
with open(rules_filename, 'r', encoding='utf-8') as fp_rules:
rules_lines = fp_rules.readlines()

View File

@ -20,6 +20,7 @@ from utils import get_video_extensions
from utils import get_audio_extensions
from utils import get_image_mime_type
from utils import lines_in_file
from utils import data_dir
def _get_masto_api_v2id_from_nickname(nickname: str) -> int:
@ -37,7 +38,7 @@ def _meta_data_instance_v2(show_accounts: bool,
version: str, translate: {}) -> {}:
""" /api/v2/instance endpoint
"""
account_dir = base_dir + '/accounts/' + admin_nickname + '@' + domain
account_dir = data_dir(base_dir) + '/' + admin_nickname + '@' + domain
admin_actor_filename = account_dir + '.json'
if not os.path.isfile(admin_actor_filename):
return {}
@ -48,8 +49,7 @@ def _meta_data_instance_v2(show_accounts: bool,
return {}
rules_list = []
rules_filename = \
base_dir + '/accounts/tos.md'
rules_filename = data_dir(base_dir) + '/tos.md'
if os.path.isfile(rules_filename):
with open(rules_filename, 'r', encoding='utf-8') as fp_rules:
rules_lines = fp_rules.readlines()

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Core"
import os
from utils import data_dir
from utils import is_account_dir
from utils import get_nickname_from_actor
from utils import get_domain_from_actor
@ -219,7 +220,8 @@ def migrate_accounts(base_dir: str, session,
"""
# update followers and following lists for each account
ctr = 0
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for handle in dirs:
if not is_account_dir(handle):
continue

View File

@ -36,6 +36,7 @@ from utils import clear_from_post_caches
from utils import dangerous_markup
from utils import local_actor_url
from utils import text_in_file
from utils import data_dir
from inbox import store_hash_tags
from session import create_session
from threads import begin_thread
@ -45,7 +46,7 @@ def _update_feeds_outbox_index(base_dir: str, domain: str,
post_id: str) -> None:
"""Updates the index used for imported RSS feeds
"""
base_path = base_dir + '/accounts/news@' + domain
base_path = data_dir(base_dir) + '/news@' + domain
index_filename = base_path + '/outbox.index'
if os.path.isfile(index_filename):
@ -387,7 +388,7 @@ def _newswire_hashtag_processing(base_dir: str, post_json_object: {},
Returns true if the post should be saved to the news timeline
of this instance
"""
rules_filename = base_dir + '/accounts/hashtagrules.txt'
rules_filename = data_dir(base_dir) + '/hashtagrules.txt'
if not os.path.isfile(rules_filename):
return True
rules = []
@ -447,7 +448,7 @@ def _create_news_mirror(base_dir: str, domain: str,
if '|' in url or '>' in url:
return True
mirror_dir = base_dir + '/accounts/newsmirror'
mirror_dir = data_dir(base_dir) + '/newsmirror'
if not os.path.isdir(mirror_dir):
os.mkdir(mirror_dir)
@ -457,7 +458,7 @@ def _create_news_mirror(base_dir: str, domain: str,
no_of_dirs = len(dirs)
break
mirror_index_filename = base_dir + '/accounts/newsmirror.txt'
mirror_index_filename = data_dir(base_dir) + '/newsmirror.txt'
if max_mirrored_articles > 0 and no_of_dirs > max_mirrored_articles:
if not os.path.isfile(mirror_index_filename):
@ -558,7 +559,7 @@ def _convert_rss_to_activitypub(base_dir: str, http_prefix: str,
print('No newswire to convert')
return
base_path = base_dir + '/accounts/news@' + domain + '/outbox'
base_path = data_dir(base_dir) + '/news@' + domain + '/outbox'
if not os.path.isdir(base_path):
os.mkdir(base_path)
@ -787,8 +788,8 @@ def run_newswire_daemon(base_dir: str, httpd,
translate: {}) -> None:
"""Periodically updates RSS feeds
"""
newswire_state_filename = base_dir + '/accounts/.newswirestate.json'
refresh_filename = base_dir + '/accounts/.refresh_newswire'
newswire_state_filename = data_dir(base_dir) + '/.newswirestate.json'
refresh_filename = data_dir(base_dir) + '/.refresh_newswire'
print('Starting newswire daemon')
# initial sleep to allow the system to start up

View File

@ -19,6 +19,7 @@ from datetime import timezone
from collections import OrderedDict
from utils import valid_post_date
from categories import set_hashtag_category
from utils import data_dir
from utils import string_contains
from utils import image_mime_types_dict
from utils import resembles_url
@ -1623,7 +1624,8 @@ def _add_blogs_to_newswire(base_dir: str, domain: str, newswire: {},
moderation_dict = {}
# go through each account
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for handle in dirs:
if not is_account_dir(handle):
continue
@ -1639,7 +1641,7 @@ def _add_blogs_to_newswire(base_dir: str, domain: str, newswire: {},
continue
# is there a blogs timeline for this account?
account_dir = os.path.join(base_dir + '/accounts', handle)
account_dir = os.path.join(dir_str, handle)
blogs_index = account_dir + '/tlblogs.index'
if os.path.isfile(blogs_index):
domain = handle.split('@')[1]
@ -1655,7 +1657,7 @@ def _add_blogs_to_newswire(base_dir: str, domain: str, newswire: {},
OrderedDict(sorted(moderation_dict.items(), reverse=True))
# save the moderation queue details for later display
newswire_moderation_filename = \
base_dir + '/accounts/newswiremoderation.txt'
data_dir(base_dir) + '/newswiremoderation.txt'
if sorted_moderation_dict:
save_json(sorted_moderation_dict, newswire_moderation_filename)
else:
@ -1678,7 +1680,7 @@ def get_dict_from_newswire(session, base_dir: str, domain: str,
timeout_sec: int) -> {}:
"""Gets rss feeds as a dictionary from newswire file
"""
subscriptions_filename = base_dir + '/accounts/newswire.txt'
subscriptions_filename = data_dir(base_dir) + '/newswire.txt'
if not os.path.isfile(subscriptions_filename):
return {}

View File

@ -15,6 +15,7 @@ from posts import outbox_message_create_wrap
from posts import save_post_to_box
from posts import send_to_followers_thread
from posts import send_to_named_addresses_thread
from utils import data_dir
from utils import quote_toots_allowed
from utils import get_post_attachments
from utils import get_attributed_to
@ -435,7 +436,7 @@ def post_message_to_outbox(session, translate: {},
break
media_dir = \
base_dir + '/accounts/' + \
data_dir(base_dir) + '/' + \
post_to_nickname + '@' + domain
upload_media_filename = media_dir + '/upload.' + file_extension
if not os.path.isfile(upload_media_filename):
@ -537,7 +538,7 @@ def post_message_to_outbox(session, translate: {},
if is_featured_writer(base_dir, post_to_nickname, domain):
saved_post_id = saved_filename.split('/')[-1]
blogs_dir = \
base_dir + '/accounts/news@' + domain + '/tlblogs'
data_dir(base_dir) + '/news@' + domain + '/tlblogs'
if not os.path.isdir(blogs_dir):
os.mkdir(blogs_dir)
copyfile(saved_filename, blogs_dir + '/' + saved_post_id)
@ -547,7 +548,7 @@ def post_message_to_outbox(session, translate: {},
# clear the citations file if it exists
citations_filename = \
base_dir + '/accounts/' + \
data_dir(base_dir) + '/' + \
post_to_nickname + '@' + domain + '/.citations.txt'
if os.path.isfile(citations_filename):
try:

View File

@ -74,6 +74,7 @@ from utils import dangerous_svg
from utils import text_in_file
from utils import contains_statuses
from utils import get_actor_from_post
from utils import data_dir
from session import get_json_valid
from session import create_session
from session import get_json
@ -564,21 +565,21 @@ def _create_person_base(base_dir: str, nickname: str, domain: str, port: int,
if save_to_file:
# save person to file
people_subdir = '/accounts'
if not os.path.isdir(base_dir + people_subdir):
os.mkdir(base_dir + people_subdir)
if not os.path.isdir(base_dir + people_subdir + '/' + handle):
os.mkdir(base_dir + people_subdir + '/' + handle)
if not os.path.isdir(base_dir + people_subdir + '/' +
people_subdir = data_dir(base_dir)
if not os.path.isdir(people_subdir):
os.mkdir(people_subdir)
if not os.path.isdir(people_subdir + '/' + handle):
os.mkdir(people_subdir + '/' + handle)
if not os.path.isdir(people_subdir + '/' +
handle + '/inbox'):
os.mkdir(base_dir + people_subdir + '/' + handle + '/inbox')
if not os.path.isdir(base_dir + people_subdir + '/' +
os.mkdir(people_subdir + '/' + handle + '/inbox')
if not os.path.isdir(people_subdir + '/' +
handle + '/outbox'):
os.mkdir(base_dir + people_subdir + '/' + handle + '/outbox')
if not os.path.isdir(base_dir + people_subdir + '/' +
os.mkdir(people_subdir + '/' + handle + '/outbox')
if not os.path.isdir(people_subdir + '/' +
handle + '/queue'):
os.mkdir(base_dir + people_subdir + '/' + handle + '/queue')
filename = base_dir + people_subdir + '/' + handle + '.json'
os.mkdir(people_subdir + '/' + handle + '/queue')
filename = people_subdir + '/' + handle + '.json'
save_json(new_person, filename)
# save to cache
@ -662,7 +663,8 @@ def create_group(base_dir: str, nickname: str, domain: str, port: int,
def clear_person_qrcodes(base_dir: str) -> None:
"""Clears qrcodes for all accounts
"""
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for handle in dirs:
if '@' not in handle:
continue
@ -720,7 +722,8 @@ def create_person(base_dir: str, nickname: str, domain: str, port: int,
if registrations_remaining <= 0:
return None, None, None, None
else:
if os.path.isdir(base_dir + '/accounts/news@' + domain):
dir_str = data_dir(base_dir)
if os.path.isdir(dir_str + '/news@' + domain):
# news account already exists
return None, None, None, None
@ -742,8 +745,9 @@ def create_person(base_dir: str, nickname: str, domain: str, port: int,
set_role(base_dir, nickname, domain, 'moderator')
set_role(base_dir, nickname, domain, 'editor')
if not os.path.isdir(base_dir + '/accounts'):
os.mkdir(base_dir + '/accounts')
dir_str = data_dir(base_dir)
if not os.path.isdir(dir_str):
os.mkdir(dir_str)
account_dir = acct_dir(base_dir, nickname, domain)
if not os.path.isdir(account_dir):
os.mkdir(account_dir)
@ -1002,14 +1006,14 @@ def person_upgrade_actor(base_dir: str, person_json: {},
# also update the actor within the cache
actor_cache_filename = \
base_dir + '/accounts/cache/actors/' + \
data_dir(base_dir) + '/cache/actors/' + \
person_json['id'].replace('/', '#') + '.json'
if os.path.isfile(actor_cache_filename):
save_json(person_json, actor_cache_filename)
# update domain/@nickname in actors cache
actor_cache_filename = \
base_dir + '/accounts/cache/actors/' + \
data_dir(base_dir) + '/cache/actors/' + \
replace_users_with_at(person_json['id']).replace('/', '#') + \
'.json'
if os.path.isfile(actor_cache_filename):
@ -1244,7 +1248,7 @@ def set_bio(base_dir: str, nickname: str, domain: str, bio: str) -> bool:
def reenable_account(base_dir: str, nickname: str) -> None:
"""Removes an account suspension
"""
suspended_filename = base_dir + '/accounts/suspended.txt'
suspended_filename = data_dir(base_dir) + '/suspended.txt'
if os.path.isfile(suspended_filename):
lines = []
with open(suspended_filename, 'r', encoding='utf-8') as fp_sus:
@ -1270,7 +1274,7 @@ def suspend_account(base_dir: str, nickname: str, domain: str) -> None:
return
# Don't suspend moderators
moderators_file = base_dir + '/accounts/moderators.txt'
moderators_file = data_dir(base_dir) + '/moderators.txt'
if os.path.isfile(moderators_file):
with open(moderators_file, 'r', encoding='utf-8') as fp_mod:
lines = fp_mod.readlines()
@ -1291,7 +1295,7 @@ def suspend_account(base_dir: str, nickname: str, domain: str) -> None:
except OSError:
print('EX: suspend_account unable to delete ' + token_filename)
suspended_filename = base_dir + '/accounts/suspended.txt'
suspended_filename = data_dir(base_dir) + '/suspended.txt'
if os.path.isfile(suspended_filename):
with open(suspended_filename, 'r', encoding='utf-8') as fp_sus:
lines = fp_sus.readlines()
@ -1328,7 +1332,7 @@ def can_remove_post(base_dir: str,
return False
# is the post by a moderator?
moderators_file = base_dir + '/accounts/moderators.txt'
moderators_file = data_dir(base_dir) + '/moderators.txt'
if os.path.isfile(moderators_file):
with open(moderators_file, 'r', encoding='utf-8') as fp_mod:
lines = fp_mod.readlines()
@ -1386,7 +1390,7 @@ def remove_account(base_dir: str, nickname: str,
return False
# Don't remove moderators
moderators_file = base_dir + '/accounts/moderators.txt'
moderators_file = data_dir(base_dir) + '/moderators.txt'
if os.path.isfile(moderators_file):
with open(moderators_file, 'r', encoding='utf-8') as fp_mod:
lines = fp_mod.readlines()
@ -2192,11 +2196,12 @@ def update_memorial_flags(base_dir: str, person_cache: {}) -> None:
"""
memorials = get_memorials(base_dir).split('\n')
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for account in dirs:
if not is_account_dir(account):
continue
actor_filename = base_dir + '/accounts/' + account + '.json'
actor_filename = data_dir(base_dir) + '/' + account + '.json'
if not os.path.isfile(actor_filename):
continue
actor_json = load_json(actor_filename)

View File

@ -92,6 +92,7 @@ from utils import acct_dir
from utils import local_actor_url
from utils import get_reply_to
from utils import get_actor_from_post
from utils import data_dir
from media import get_music_metadata
from media import attach_media
from media import replace_you_tube
@ -151,7 +152,7 @@ def convert_post_content_to_html(message_json: {}) -> None:
def is_moderator(base_dir: str, nickname: str) -> bool:
"""Returns true if the given nickname is a moderator
"""
moderators_file = base_dir + '/accounts/moderators.txt'
moderators_file = data_dir(base_dir) + '/moderators.txt'
if not os.path.isfile(moderators_file):
admin_name = get_config_param(base_dir, 'admin')
@ -1592,7 +1593,7 @@ def _create_post_mod_report(base_dir: str,
else:
new_post['moderationStatus'] = 'pending'
# save to index file
moderation_index_file = base_dir + '/accounts/moderation.txt'
moderation_index_file = data_dir(base_dir) + '/moderation.txt'
try:
with open(moderation_index_file, 'a+', encoding='utf-8') as mod_file:
mod_file.write(new_post_id + '\n')
@ -2626,7 +2627,7 @@ def create_report_post(base_dir: str,
# create the list of moderators from the moderators file
moderators_list = []
moderators_file = base_dir + '/accounts/moderators.txt'
moderators_file = data_dir(base_dir) + '/moderators.txt'
if os.path.isfile(moderators_file):
with open(moderators_file, 'r', encoding='utf-8') as fp_mod:
for line in fp_mod:
@ -4348,7 +4349,7 @@ def create_moderation(base_dir: str, nickname: str, domain: str, port: int,
}
if is_moderator(base_dir, nickname):
moderation_index_file = base_dir + '/accounts/moderation.txt'
moderation_index_file = data_dir(base_dir) + '/moderation.txt'
if os.path.isfile(moderation_index_file):
with open(moderation_index_file, 'r',
encoding='utf-8') as index_file:
@ -5001,7 +5002,8 @@ def archive_posts(base_dir: str, http_prefix: str, archive_dir: str,
if not os.path.isdir(archive_dir + '/accounts'):
os.mkdir(archive_dir + '/accounts')
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for handle in dirs:
if '@' in handle:
nickname = handle.split('@')[0]
@ -5134,7 +5136,8 @@ def expire_posts(base_dir: str, http_prefix: str,
"""Expires posts for instance accounts
"""
expired_post_count = 0
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for handle in dirs:
if '@' not in handle:
continue
@ -5590,7 +5593,7 @@ def get_public_post_domains_blocked(session, base_dir: str,
if not post_domains:
return []
blocking_filename = base_dir + '/accounts/blocking.txt'
blocking_filename = data_dir(base_dir) + '/blocking.txt'
if not os.path.isfile(blocking_filename):
return []
@ -5644,7 +5647,7 @@ def check_domains(session, base_dir: str,
if not non_mutuals:
print('No non-mutual followers were found')
return
follower_warning_filename = base_dir + '/accounts/followerWarnings.txt'
follower_warning_filename = data_dir(base_dir) + '/followerWarnings.txt'
update_follower_warnings = False
follower_warning_str = ''
if os.path.isfile(follower_warning_filename):
@ -5749,8 +5752,7 @@ def populate_replies_json(base_dir: str, nickname: str, domain: str,
if not reply_found:
message_id2 = remove_eol(message_id)
search_filename = \
base_dir + \
'/accounts/inbox@' + \
data_dir(base_dir) + '/inbox@' + \
domain + '/inbox/' + \
message_id2.replace('/', '#') + '.json'
if os.path.isfile(search_filename):
@ -6533,7 +6535,7 @@ def post_is_muted(base_dir: str, nickname: str, domain: str,
is_muted = True
else:
mute_filename = \
base_dir + '/accounts/cache/announce/' + nickname + \
data_dir(base_dir) + '/cache/announce/' + nickname + \
'/' + message_id.replace('/', '#') + '.json.muted'
if os.path.isfile(mute_filename):
is_muted = True

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Core"
import pyqrcode
from utils import data_dir
def save_domain_qrcode(base_dir: str, http_prefix: str,
@ -15,6 +16,6 @@ def save_domain_qrcode(base_dir: str, http_prefix: str,
"""Saves a qrcode image for the domain name
This helps to transfer onion or i2p domains to a mobile device
"""
qrcode_filename = base_dir + '/accounts/qrcode.png'
qrcode_filename = data_dir(base_dir) + '/qrcode.png'
url = pyqrcode.create(http_prefix + '://' + domain_full)
url.png(qrcode_filename, scale)

View File

@ -11,6 +11,7 @@ import os
import re
import urllib.parse
from pprint import pprint
from utils import data_dir
from utils import has_object_string
from utils import has_object_string_object
from utils import has_object_string_type
@ -463,7 +464,7 @@ def outbox_undo_reaction(recent_posts_cache: {},
def _update_common_reactions(base_dir: str, emoji_content: str) -> None:
"""Updates the list of commonly used reactions
"""
common_reactions_filename = base_dir + '/accounts/common_reactions.txt'
common_reactions_filename = data_dir(base_dir) + '/common_reactions.txt'
common_reactions = None
if os.path.isfile(common_reactions_filename):
try:

View File

@ -10,6 +10,7 @@ __module_group__ = "Core"
import os
from collections import OrderedDict
from utils import data_dir
from utils import get_post_attachments
from utils import get_content_from_post
from utils import has_object_dict
@ -259,7 +260,7 @@ def remove_reading_event(base_dir: str,
if not book_event_type:
print('remove_reading_event no book event')
return False
reading_path = base_dir + '/accounts/reading'
reading_path = data_dir(base_dir) + '/reading'
readers_path = reading_path + '/readers'
reader_books_filename = \
readers_path + '/' + actor.replace('/', '#') + '.json'
@ -391,7 +392,7 @@ def _update_recent_books_list(base_dir: str, book_id: str,
debug: bool) -> None:
"""prepend a book to the recent books list
"""
recent_books_filename = base_dir + '/accounts/recent_books.txt'
recent_books_filename = data_dir(base_dir) + '/recent_books.txt'
if os.path.isfile(recent_books_filename):
try:
with open(recent_books_filename, 'r+',
@ -419,7 +420,7 @@ def _deduplicate_recent_books_list(base_dir: str,
max_recent_books: int) -> None:
""" Deduplicate and limit the length of the recent books list
"""
recent_books_filename = base_dir + '/accounts/recent_books.txt'
recent_books_filename = data_dir(base_dir) + '/recent_books.txt'
if not os.path.isfile(recent_books_filename):
return
@ -485,9 +486,10 @@ def store_book_events(base_dir: str,
if debug:
print('DEBUG: no book event')
return False
reading_path = base_dir + '/accounts/reading'
if not os.path.isdir(base_dir + '/accounts'):
os.mkdir(base_dir + '/accounts')
dir_str = data_dir(base_dir)
reading_path = dir_str + '/reading'
if not os.path.isdir(dir_str):
os.mkdir(dir_str)
if not os.path.isdir(reading_path):
os.mkdir(reading_path)
books_path = reading_path + '/books'
@ -558,7 +560,7 @@ def html_profile_book_list(base_dir: str, actor: str, no_of_books: int,
authorized: bool) -> str:
"""Returns html for displaying a list of books on a profile screen
"""
reading_path = base_dir + '/accounts/reading'
reading_path = data_dir(base_dir) + '/reading'
readers_path = reading_path + '/readers'
reader_books_filename = \
readers_path + '/' + actor.replace('/', '#') + '.json'

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Core"
import os
from utils import data_dir
from utils import get_user_paths
from utils import is_dormant
from utils import acct_dir
@ -26,7 +27,7 @@ def get_moved_accounts(base_dir: str, nickname: str, domain: str,
filename: str) -> {}:
"""returns a dict of moved accounts
"""
moved_accounts_filename = base_dir + '/accounts/actors_moved.txt'
moved_accounts_filename = data_dir(base_dir) + '/actors_moved.txt'
if not os.path.isfile(moved_accounts_filename):
return {}
refollow_str = ''
@ -230,12 +231,12 @@ def update_moved_actors(base_dir: str, debug: bool) -> None:
# get the handles to be checked for movedTo attribute
handles_to_check = []
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for account in dirs:
if not is_account_dir(account):
continue
following_filename = \
base_dir + '/accounts/' + account + '/following.txt'
following_filename = dir_str + '/' + account + '/following.txt'
if not os.path.isfile(following_filename):
continue
following_str = ''
@ -288,7 +289,7 @@ def update_moved_actors(base_dir: str, debug: bool) -> None:
else:
print('No moved accounts detected')
moved_accounts_filename = base_dir + '/accounts/actors_moved.txt'
moved_accounts_filename = data_dir(base_dir) + '/actors_moved.txt'
if not moved_str:
if os.path.isfile(moved_accounts_filename):
try:

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Profile Metadata"
import os
from utils import data_dir
from utils import load_json
from utils import save_json
from utils import get_status_number
@ -22,14 +23,15 @@ def _clear_role_status(base_dir: str, role: str) -> None:
This could be slow if there are many users, but only happens
rarely when roles are appointed or removed
"""
directory = os.fsencode(base_dir + '/accounts/')
dir_str = data_dir(base_dir)
directory = os.fsencode(dir_str + '/')
for fname in os.scandir(directory):
filename = os.fsdecode(fname.name)
if '@' not in filename:
continue
if not filename.endswith(".json"):
continue
filename = os.path.join(base_dir + '/accounts/', filename)
filename = os.path.join(dir_str + '/', filename)
if not text_in_file('"' + role + '"', filename):
continue
actor_json = load_json(filename)
@ -48,7 +50,7 @@ def _add_role(base_dir: str, nickname: str, domain: str,
This is a file containing the nicknames of accounts having this role
"""
domain = remove_domain_port(domain)
role_file = base_dir + '/accounts/' + role_filename
role_file = data_dir(base_dir) + '/' + role_filename
if os.path.isfile(role_file):
# is this nickname already in the file?
@ -71,7 +73,8 @@ def _add_role(base_dir: str, nickname: str, domain: str,
role_nickname = role_nickname.strip('\n').strip('\r')
if len(role_nickname) < 2:
continue
if os.path.isdir(base_dir + '/accounts/' +
dir_str = data_dir(base_dir)
if os.path.isdir(dir_str + '/' +
role_nickname + '@' + domain):
fp_role.write(role_nickname + '\n')
except OSError:
@ -90,7 +93,7 @@ def _remove_role(base_dir: str, nickname: str, role_filename: str) -> None:
"""Removes a role nickname from the file.
This is a file containing the nicknames of accounts having this role
"""
role_file = base_dir + '/accounts/' + role_filename
role_file = data_dir(base_dir) + '/' + role_filename
if not os.path.isfile(role_file):
return
@ -270,7 +273,7 @@ def actor_has_role(actor_json: {}, role_name: str) -> bool:
def is_devops(base_dir: str, nickname: str) -> bool:
"""Returns true if the given nickname has the devops role
"""
devops_file = base_dir + '/accounts/devops.txt'
devops_file = data_dir(base_dir) + '/devops.txt'
if not os.path.isfile(devops_file):
admin_name = get_config_param(base_dir, 'admin')
@ -305,7 +308,7 @@ def set_roles_from_list(base_dir: str, domain: str, admin_nickname: str,
# check for admin user
if not path.startswith('/users/' + admin_nickname + '/'):
return
roles_filename = base_dir + '/accounts/' + list_filename
roles_filename = data_dir(base_dir) + '/' + list_filename
if not fields.get(list_name):
if os.path.isfile(roles_filename):
_clear_role_status(base_dir, role_name)

View File

@ -9,6 +9,7 @@ __module_group__ = "Calendar"
import os
import time
from utils import data_dir
from utils import date_from_string_format
from utils import date_epoch
from utils import acct_handle_dir
@ -196,7 +197,8 @@ def run_post_schedule(base_dir: str, httpd, max_scheduled_posts: int):
while True:
time.sleep(60)
# for each account
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for account in dirs:
if '@' not in account:
continue
@ -204,7 +206,7 @@ def run_post_schedule(base_dir: str, httpd, max_scheduled_posts: int):
continue
# scheduled posts index for this account
schedule_index_filename = \
base_dir + '/accounts/' + account + '/schedule.index'
dir_str + '/' + account + '/schedule.index'
if not os.path.isfile(schedule_index_filename):
continue
_update_post_schedule(base_dir, account,

View File

@ -23,6 +23,7 @@ from session import post_json
from session import post_image
from session import create_session
from session import get_json_valid
from utils import data_dir
from utils import resembles_url
from utils import date_utcnow
from utils import dangerous_markup
@ -282,7 +283,8 @@ def _indicate_new_share_available(base_dir: str, http_prefix: str,
block_federated: []) -> None:
"""Indicate to each account that a new share is available
"""
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for handle in dirs:
if not is_account_dir(handle):
continue
@ -417,7 +419,8 @@ def expire_shares(base_dir: str, max_shares_on_profile: int,
person_cache: {}) -> None:
"""Removes expired items from shares
"""
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for account in dirs:
if not is_account_dir(account):
continue
@ -1401,7 +1404,8 @@ def shares_catalog_endpoint(base_dir: str, http_prefix: str,
curr_date = date_utcnow()
curr_date_str = curr_date.strftime("%Y-%m-%d")
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue
@ -1505,7 +1509,7 @@ def generate_shared_item_federation_tokens(shared_items_federated_domains: [],
tokens_json = {}
if base_dir:
tokens_filename = \
base_dir + '/accounts/sharedItemsFederationTokens.json'
data_dir(base_dir) + '/sharedItemsFederationTokens.json'
if os.path.isfile(tokens_filename):
tokens_json = load_json(tokens_filename, 1, 2)
if tokens_json is None:
@ -1536,7 +1540,7 @@ def update_shared_item_federation_token(base_dir: str,
tokens_json = {}
if base_dir:
tokens_filename = \
base_dir + '/accounts/sharedItemsFederationTokens.json'
data_dir(base_dir) + '/sharedItemsFederationTokens.json'
if os.path.isfile(tokens_filename):
if debug:
print('Update loading tokens for ' + token_domain_full)
@ -1581,7 +1585,7 @@ def merge_shared_item_tokens(base_dir: str, domain_full: str,
changed = True
if base_dir and changed:
tokens_filename = \
base_dir + '/accounts/sharedItemsFederationTokens.json'
data_dir(base_dir) + '/sharedItemsFederationTokens.json'
save_json(tokens_json, tokens_filename)
return tokens_json
@ -1596,7 +1600,7 @@ def create_shared_item_federation_token(base_dir: str,
tokens_json = {}
if base_dir:
tokens_filename = \
base_dir + '/accounts/sharedItemsFederationTokens.json'
data_dir(base_dir) + '/sharedItemsFederationTokens.json'
if os.path.isfile(tokens_filename):
tokens_json = load_json(tokens_filename, 1, 2)
if tokens_json is None:
@ -1642,7 +1646,7 @@ def authorize_shared_items(shared_items_federated_domains: [],
return False
if not tokens_json:
tokens_filename = \
base_dir + '/accounts/sharedItemsFederationTokens.json'
data_dir(base_dir) + '/sharedItemsFederationTokens.json'
if not os.path.isfile(tokens_filename):
if debug:
print('DEBUG: shared item federation tokens file missing ' +
@ -1758,7 +1762,7 @@ def _generate_next_shares_token_update(base_dir: str,
"""Creates a file containing the next date when the shared items token
for this instance will be updated
"""
token_update_dir = base_dir + '/accounts'
token_update_dir = data_dir(base_dir)
if not os.path.isdir(base_dir):
os.mkdir(base_dir)
if not os.path.isdir(token_update_dir):
@ -1810,7 +1814,7 @@ def _regenerate_shares_token(base_dir: str, domain_full: str,
federated shares list of domains continue to follow and communicate
then they will receive the new token automatically
"""
token_update_filename = base_dir + '/accounts/.tokenUpdate'
token_update_filename = data_dir(base_dir) + '/.tokenUpdate'
if not os.path.isfile(token_update_filename):
return
next_update_sec = None
@ -1870,7 +1874,7 @@ def run_federated_shares_daemon(base_dir: str, httpd, http_prefix: str,
# load the tokens
tokens_filename = \
base_dir + '/accounts/sharedItemsFederationTokens.json'
data_dir(base_dir) + '/sharedItemsFederationTokens.json'
if not os.path.isfile(tokens_filename):
time.sleep(file_check_interval_sec)
continue

View File

@ -9,8 +9,9 @@ __status__ = "Production"
__module_group__ = "Core"
import http.client
from urllib.parse import urlparse
import ssl
from urllib.parse import urlparse
from utils import data_dir
class Result:
@ -157,7 +158,7 @@ def referer_is_active(http_prefix: str,
def save_unavailable_sites(base_dir: str, sites_unavailable: []) -> None:
"""Save a list of unavailable sites
"""
unavailable_sites_filename = base_dir + '/accounts/unavailable_sites.txt'
unavailable_sites_filename = data_dir(base_dir) + '/unavailable_sites.txt'
sites_unavailable.sort()
try:
with open(unavailable_sites_filename, 'w+',
@ -172,7 +173,7 @@ def save_unavailable_sites(base_dir: str, sites_unavailable: []) -> None:
def load_unavailable_sites(base_dir: str) -> []:
"""load a list of unavailable sites
"""
unavailable_sites_filename = base_dir + '/accounts/unavailable_sites.txt'
unavailable_sites_filename = data_dir(base_dir) + '/unavailable_sites.txt'
sites_unavailable = []
try:
with open(unavailable_sites_filename, 'r',

View File

@ -11,6 +11,7 @@ import os
import html
import random
import urllib.parse
from utils import data_dir
from utils import get_post_attachments
from utils import get_cached_post_filename
from utils import remove_id_ending
@ -77,7 +78,7 @@ def _speaker_pronounce(base_dir: str, say_text: str, translate: {}) -> str:
line items such as:
Epicyon -> Epi-cyon
"""
pronounce_filename = base_dir + '/accounts/speaker_pronounce.txt'
pronounce_filename = data_dir(base_dir) + '/speaker_pronounce.txt'
convert_dict = {}
if translate:
convert_dict = {

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Web Interface"
import os
from utils import data_dir
from utils import string_ends_with
from utils import is_account_dir
from utils import load_json
@ -153,6 +154,7 @@ def _copy_theme_help_files(base_dir: str, theme_name: str,
theme_dir = base_dir + '/theme/' + theme_name + '/welcome'
if not os.path.isdir(theme_dir):
theme_dir = base_dir + '/defaultwelcome'
dir_str = data_dir(base_dir)
for _, _, files in os.walk(theme_dir):
for help_markdown_file in files:
if not help_markdown_file.endswith('_' + system_language + '.md'):
@ -162,9 +164,9 @@ def _copy_theme_help_files(base_dir: str, theme_name: str,
'.md')
if dest_help_markdown_file in ('profile.md', 'final.md'):
dest_help_markdown_file = 'welcome_' + dest_help_markdown_file
if os.path.isdir(base_dir + '/accounts'):
if os.path.isdir(dir_str):
copyfile(theme_dir + '/' + help_markdown_file,
base_dir + '/accounts/' + dest_help_markdown_file)
dir_str + '/' + dest_help_markdown_file)
break
@ -453,7 +455,7 @@ def enable_grayscale(base_dir: str) -> None:
except OSError as ex:
print('EX: enable_grayscale unable to read ' +
template_filename + ' ' + str(ex))
grayscale_filename = base_dir + '/accounts/.grayscale'
grayscale_filename = data_dir(base_dir) + '/.grayscale'
if not os.path.isfile(grayscale_filename):
try:
with open(grayscale_filename, 'w+', encoding='utf-8') as grayfile:
@ -483,7 +485,7 @@ def disable_grayscale(base_dir: str) -> None:
except OSError as ex:
print('EX: disable_grayscale unable to read ' +
template_filename + ' ' + str(ex))
grayscale_filename = base_dir + '/accounts/.grayscale'
grayscale_filename = data_dir(base_dir) + '/.grayscale'
if os.path.isfile(grayscale_filename):
try:
os.remove(grayscale_filename)
@ -563,7 +565,7 @@ def set_theme_from_designer(base_dir: str, theme_name: str, domain: str,
allow_local_network_access: bool,
system_language: str,
dyslexic_font: bool):
custom_theme_filename = base_dir + '/accounts/theme.json'
custom_theme_filename = data_dir(base_dir) + '/theme.json'
save_json(theme_params, custom_theme_filename)
set_theme(base_dir, theme_name, domain,
allow_local_network_access, system_language,
@ -573,7 +575,7 @@ def set_theme_from_designer(base_dir: str, theme_name: str, domain: str,
def reset_theme_designer_settings(base_dir: str) -> None:
"""Resets the theme designer settings
"""
custom_variables_file = base_dir + '/accounts/theme.json'
custom_variables_file = data_dir(base_dir) + '/theme.json'
if os.path.isfile(custom_variables_file):
try:
os.remove(custom_variables_file)
@ -593,7 +595,7 @@ def _read_variables_file(base_dir: str, theme_name: str,
return
# set custom theme parameters
custom_variables_file = base_dir + '/accounts/theme.json'
custom_variables_file = data_dir(base_dir) + '/theme.json'
if os.path.isfile(custom_variables_file):
custom_theme_params = load_json(custom_variables_file, 0)
if custom_theme_params:
@ -667,7 +669,7 @@ def _set_theme_fonts(base_dir: str, theme_name: str) -> None:
def get_text_mode_banner(base_dir: str) -> str:
"""Returns the banner used for shell browsers, like Lynx
"""
text_mode_banner_filename = base_dir + '/accounts/banner.txt'
text_mode_banner_filename = data_dir(base_dir) + '/banner.txt'
if os.path.isfile(text_mode_banner_filename):
with open(text_mode_banner_filename, 'r',
encoding='utf-8') as fp_text:
@ -680,7 +682,7 @@ def get_text_mode_banner(base_dir: str) -> str:
def get_text_mode_logo(base_dir: str) -> str:
"""Returns the login screen logo used for shell browsers, like Lynx
"""
text_mode_logo_filename = base_dir + '/accounts/logo.txt'
text_mode_logo_filename = data_dir(base_dir) + '/logo.txt'
if not os.path.isfile(text_mode_logo_filename):
text_mode_logo_filename = base_dir + '/img/logo.txt'
@ -696,40 +698,38 @@ def _set_text_mode_theme(base_dir: str, name: str) -> None:
# in browsers such as Lynx
text_mode_logo_filename = \
base_dir + '/theme/' + name + '/logo.txt'
dir_str = data_dir(base_dir)
if os.path.isfile(text_mode_logo_filename):
try:
copyfile(text_mode_logo_filename,
base_dir + '/accounts/logo.txt')
copyfile(text_mode_logo_filename, dir_str + '/logo.txt')
except OSError:
print('EX: _set_text_mode_theme unable to copy ' +
text_mode_logo_filename + ' ' +
base_dir + '/accounts/logo.txt')
dir_str + '/logo.txt')
else:
dir_str = data_dir(base_dir)
try:
copyfile(base_dir + '/img/logo.txt',
base_dir + '/accounts/logo.txt')
copyfile(base_dir + '/img/logo.txt', dir_str + '/logo.txt')
except OSError:
print('EX: _set_text_mode_theme unable to copy ' +
base_dir + '/img/logo.txt ' +
base_dir + '/accounts/logo.txt')
base_dir + '/img/logo.txt ' + dir_str + '/logo.txt')
# set the text mode banner which appears in browsers such as Lynx
text_mode_banner_filename = \
base_dir + '/theme/' + name + '/banner.txt'
if os.path.isfile(base_dir + '/accounts/banner.txt'):
if os.path.isfile(dir_str + '/banner.txt'):
try:
os.remove(base_dir + '/accounts/banner.txt')
os.remove(dir_str + '/banner.txt')
except OSError:
print('EX: _set_text_mode_theme unable to delete ' +
base_dir + '/accounts/banner.txt')
dir_str + '/banner.txt')
if os.path.isfile(text_mode_banner_filename):
try:
copyfile(text_mode_banner_filename,
base_dir + '/accounts/banner.txt')
copyfile(text_mode_banner_filename, dir_str + '/banner.txt')
except OSError:
print('EX: _set_text_mode_theme unable to copy ' +
text_mode_banner_filename + ' ' +
base_dir + '/accounts/banner.txt')
dir_str + '/banner.txt')
def _set_theme_images(base_dir: str, name: str) -> None:
@ -756,11 +756,12 @@ def _set_theme_images(base_dir: str, name: str) -> None:
'welcome')
extensions = get_image_extensions()
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue
account_dir = os.path.join(base_dir + '/accounts', acct)
account_dir = os.path.join(dir_str, acct)
for background_type in background_names:
for ext in extensions:
@ -776,7 +777,7 @@ def _set_theme_images(base_dir: str, name: str) -> None:
if os.path.isfile(background_image_filename):
try:
copyfile(background_image_filename,
base_dir + '/accounts/' +
dir_str + '/' +
background_type + '-background.' + ext)
continue
except OSError:
@ -784,14 +785,14 @@ def _set_theme_images(base_dir: str, name: str) -> None:
background_image_filename)
# background image was not found
# so remove any existing file
if os.path.isfile(base_dir + '/accounts/' +
if os.path.isfile(dir_str + '/' +
background_type + '-background.' + ext):
try:
os.remove(base_dir + '/accounts/' +
os.remove(dir_str + '/' +
background_type + '-background.' + ext)
except OSError:
print('EX: _set_theme_images unable to delete ' +
base_dir + '/accounts/' +
dir_str + '/' +
background_type + '-background.' + ext)
if os.path.isfile(profile_image_filename) and \
@ -883,9 +884,10 @@ def _set_clear_cache_flag(base_dir: str) -> None:
"""Sets a flag which can be used by an external system
(eg. a script in a cron job) to clear the browser cache
"""
if not os.path.isdir(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
if not os.path.isdir(dir_str):
return
flag_filename = base_dir + '/accounts/.clear_cache'
flag_filename = dir_str + '/.clear_cache'
try:
with open(flag_filename, 'w+', encoding='utf-8') as fp_flag:
fp_flag.write('\n')
@ -944,13 +946,13 @@ def set_theme(base_dir: str, name: str, domain: str,
# set the news avatar
news_avatar_theme_filename = \
base_dir + '/theme/' + name + '/icons/avatar_news.png'
if os.path.isdir(base_dir + '/accounts/news@' + domain):
dir_str = data_dir(base_dir)
if os.path.isdir(dir_str + '/news@' + domain):
if os.path.isfile(news_avatar_theme_filename):
news_avatar_filename = \
base_dir + '/accounts/news@' + domain + '/avatar.png'
news_avatar_filename = dir_str + '/news@' + domain + '/avatar.png'
copyfile(news_avatar_theme_filename, news_avatar_filename)
grayscale_filename = base_dir + '/accounts/.grayscale'
grayscale_filename = dir_str + '/.grayscale'
if os.path.isfile(grayscale_filename):
enable_grayscale(base_dir)
else:

View File

@ -570,12 +570,22 @@ def get_base_content_from_post(post_json_object: {},
return this_post_json['content']
def data_dir(base_dir) -> str:
"""Returns the directory where account data is stored
"""
return base_dir + '/accounts'
def acct_dir(base_dir: str, nickname: str, domain: str) -> str:
return base_dir + '/accounts/' + nickname + '@' + domain
"""Returns the directory for an account on this instance
"""
return data_dir(base_dir) + '/' + nickname + '@' + domain
def acct_handle_dir(base_dir: str, handle: str) -> str:
return base_dir + '/accounts/' + handle
"""Returns the directory for an account on this instance
"""
return data_dir(base_dir) + '/' + handle
def is_featured_writer(base_dir: str, nickname: str, domain: str) -> bool:
@ -590,7 +600,7 @@ def is_featured_writer(base_dir: str, nickname: str, domain: str) -> bool:
def refresh_newswire(base_dir: str):
"""Causes the newswire to be updates after a change to user accounts
"""
refresh_newswire_filename = base_dir + '/accounts/.refresh_newswire'
refresh_newswire_filename = data_dir(base_dir) + '/.refresh_newswire'
if os.path.isfile(refresh_newswire_filename):
return
try:
@ -750,7 +760,7 @@ def is_dormant(base_dir: str, nickname: str, domain: str, actor: str,
def is_editor(base_dir: str, nickname: str) -> bool:
"""Returns true if the given nickname is an editor
"""
editors_file = base_dir + '/accounts/editors.txt'
editors_file = data_dir(base_dir) + '/editors.txt'
if not os.path.isfile(editors_file):
admin_name = get_config_param(base_dir, 'admin')
@ -776,7 +786,7 @@ def is_editor(base_dir: str, nickname: str) -> bool:
def is_artist(base_dir: str, nickname: str) -> bool:
"""Returns true if the given nickname is an artist
"""
artists_file = base_dir + '/accounts/artists.txt'
artists_file = data_dir(base_dir) + '/artists.txt'
if not os.path.isfile(artists_file):
admin_name = get_config_param(base_dir, 'admin')
@ -986,7 +996,7 @@ def is_system_account(nickname: str) -> bool:
def get_memorials(base_dir: str) -> str:
"""Returns the nicknames for memorial accounts
"""
memorial_file = base_dir + '/accounts/memorial'
memorial_file = data_dir(base_dir) + '/memorial'
if not os.path.isfile(memorial_file):
return ''
@ -1013,7 +1023,7 @@ def set_memorials(base_dir: str, domain: str, memorial_str) -> None:
memorial_str = new_memorial_str
# save the accounts
memorial_file = base_dir + '/accounts/memorial'
memorial_file = data_dir(base_dir) + '/memorial'
try:
with open(memorial_file, 'w+', encoding='utf-8') as fp_memorial:
fp_memorial.write(memorial_str)
@ -1024,7 +1034,7 @@ def set_memorials(base_dir: str, domain: str, memorial_str) -> None:
def is_memorial_account(base_dir: str, nickname: str) -> bool:
"""Returns true if the given nickname is a memorial account
"""
memorial_file = base_dir + '/accounts/memorial'
memorial_file = data_dir(base_dir) + '/memorial'
if not os.path.isfile(memorial_file):
return False
memorial_list = []
@ -1085,7 +1095,7 @@ def is_suspended(base_dir: str, nickname: str) -> bool:
if nickname == admin_nickname:
return False
suspended_filename = base_dir + '/accounts/suspended.txt'
suspended_filename = data_dir(base_dir) + '/suspended.txt'
if os.path.isfile(suspended_filename):
with open(suspended_filename, 'r', encoding='utf-8') as susp_file:
lines = susp_file.readlines()
@ -1125,7 +1135,8 @@ def get_followers_of_person(base_dir: str,
handle_dir = acct_handle_dir(base_dir, handle)
if not os.path.isdir(handle_dir):
return followers
for subdir, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for subdir, dirs, _ in os.walk(dir_str):
for account in dirs:
filename = os.path.join(subdir, account) + '/' + follow_file
if account == handle or \
@ -1942,8 +1953,9 @@ def follow_person(base_dir: str, nickname: str, domain: str,
print('EX: follow_person unable to write ' +
unfollowed_filename)
if not os.path.isdir(base_dir + '/accounts'):
os.mkdir(base_dir + '/accounts')
dir_str = data_dir(base_dir)
if not os.path.isdir(dir_str):
os.mkdir(dir_str)
handle_to_follow = follow_nickname + '@' + follow_domain
if group_account:
handle_to_follow = '!' + handle_to_follow
@ -2019,7 +2031,7 @@ def locate_news_votes(base_dir: str, domain: str,
else:
post_url = post_url + '.json.votes'
account_dir = base_dir + '/accounts/news@' + domain + '/'
account_dir = data_dir(base_dir) + '/news@' + domain + '/'
post_filename = account_dir + 'outbox/' + post_url
if os.path.isfile(post_filename):
return post_filename
@ -2043,7 +2055,7 @@ def locate_news_arrival(base_dir: str, domain: str,
else:
post_url = post_url + '.json.arrived'
account_dir = base_dir + '/accounts/news@' + domain + '/'
account_dir = data_dir(base_dir) + '/news@' + domain + '/'
post_filename = account_dir + 'outbox/' + post_url
if os.path.isfile(post_filename):
with open(post_filename, 'r', encoding='utf-8') as arrival_file:
@ -2063,13 +2075,14 @@ def clear_from_post_caches(base_dir: str, recent_posts_cache: {},
to news will appear
"""
filename = '/postcache/' + post_id + '.html'
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if '@' not in acct:
continue
if acct.startswith('inbox@') or acct.startswith('Actor@'):
continue
cache_dir = os.path.join(base_dir + '/accounts', acct)
cache_dir = os.path.join(dir_str, acct)
post_filename = cache_dir + filename
if os.path.isfile(post_filename):
try:
@ -2114,7 +2127,7 @@ def locate_post(base_dir: str, nickname: str, domain: str,
return post_filename
# check news posts
account_dir = base_dir + '/accounts/news' + '@' + domain + '/'
account_dir = data_dir(base_dir) + '/news' + '@' + domain + '/'
post_filename = account_dir + 'outbox/' + post_url
if os.path.isfile(post_filename):
return post_filename
@ -2265,7 +2278,7 @@ def remove_moderation_post_from_index(base_dir: str, post_url: str,
debug: bool) -> None:
"""Removes a url from the moderation index
"""
moderation_index_file = base_dir + '/accounts/moderation.txt'
moderation_index_file = data_dir(base_dir) + '/moderation.txt'
if not os.path.isfile(moderation_index_file):
return
post_id = remove_id_ending(post_url)
@ -2843,7 +2856,8 @@ def no_of_accounts(base_dir: str) -> bool:
"""Returns the number of accounts on the system
"""
account_ctr = 0
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for account in dirs:
if is_account_dir(account):
account_ctr += 1
@ -2857,12 +2871,13 @@ def no_of_active_accounts_monthly(base_dir: str, months: int) -> bool:
account_ctr = 0
curr_time = int(time.time())
month_seconds = int(60*60*24*30*months)
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for account in dirs:
if not is_account_dir(account):
continue
last_used_filename = \
base_dir + '/accounts/' + account + '/.lastUsed'
dir_str + '/' + account + '/.lastUsed'
if not os.path.isfile(last_used_filename):
continue
with open(last_used_filename, 'r',
@ -4358,13 +4373,14 @@ def load_account_timezones(base_dir: str) -> {}:
"""Returns a dictionary containing the preferred timezone for each account
"""
account_timezone = {}
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if '@' not in acct:
continue
if acct.startswith('inbox@') or acct.startswith('Actor@'):
continue
acct_directory = os.path.join(base_dir + '/accounts', acct)
acct_directory = os.path.join(dir_str, acct)
tz_filename = acct_directory + '/timezone.txt'
if not os.path.isfile(tz_filename):
continue
@ -4382,14 +4398,14 @@ def load_bold_reading(base_dir: str) -> {}:
"""Returns a dictionary containing the bold reading status for each account
"""
bold_reading = {}
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if '@' not in acct:
continue
if acct.startswith('inbox@') or acct.startswith('Actor@'):
continue
bold_reading_filename = \
base_dir + '/accounts/' + acct + '/.boldReading'
bold_reading_filename = dir_str + '/' + acct + '/.boldReading'
if os.path.isfile(bold_reading_filename):
nickname = acct.split('@')[0]
bold_reading[nickname] = True
@ -4401,14 +4417,14 @@ def load_hide_follows(base_dir: str) -> {}:
"""Returns a dictionary containing the hide follows status for each account
"""
hide_follows = {}
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if '@' not in acct:
continue
if acct.startswith('inbox@') or acct.startswith('Actor@'):
continue
hide_follows_filename = \
base_dir + '/accounts/' + acct + '/.hideFollows'
hide_follows_filename = dir_str + '/' + acct + '/.hideFollows'
if os.path.isfile(hide_follows_filename):
nickname = acct.split('@')[0]
hide_follows[nickname] = True
@ -4682,7 +4698,8 @@ def load_min_images_for_accounts(base_dir: str) -> []:
be minimized by default
"""
min_images_for_accounts = []
for subdir, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for subdir, dirs, _ in os.walk(dir_str):
for account in dirs:
if not is_account_dir(account):
continue
@ -4726,7 +4743,8 @@ def load_reverse_timeline(base_dir: str) -> []:
see reversed timelines
"""
reverse_sequence = []
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue
@ -4745,7 +4763,8 @@ def save_reverse_timeline(base_dir: str, reverse_sequence: []) -> []:
"""Saves flags for each user indicating whether they prefer to
see reversed timelines
"""
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue
@ -5043,7 +5062,7 @@ def get_status_count(base_dir: str) -> int:
"""Get the total number of posts
"""
status_ctr = 0
accounts_dir = base_dir + '/accounts'
accounts_dir = data_dir(base_dir)
for _, dirs, _ in os.walk(accounts_dir):
for acct in dirs:
if not is_account_dir(acct):

View File

@ -9,6 +9,7 @@ __module_group__ = "Web Interface"
import os
from shutil import copyfile
from utils import data_dir
from utils import get_config_param
from webapp_utils import html_header_with_website_markup
from webapp_utils import html_footer
@ -21,19 +22,19 @@ def html_about(base_dir: str, http_prefix: str,
"""Show the about screen
"""
admin_nickname = get_config_param(base_dir, 'admin')
if not os.path.isfile(base_dir + '/accounts/about.md'):
dir_str = data_dir(base_dir)
if not os.path.isfile(dir_str + '/about.md'):
copyfile(base_dir + '/default_about.md',
base_dir + '/accounts/about.md')
dir_str + '/about.md')
if os.path.isfile(base_dir + '/accounts/login-background-custom.jpg'):
if not os.path.isfile(base_dir + '/accounts/login-background.jpg'):
copyfile(base_dir + '/accounts/login-background-custom.jpg',
base_dir + '/accounts/login-background.jpg')
if os.path.isfile(dir_str + '/login-background-custom.jpg'):
if not os.path.isfile(dir_str + '/login-background.jpg'):
copyfile(dir_str + '/login-background-custom.jpg',
dir_str + '/login-background.jpg')
about_text = 'Information about this instance goes here.'
if os.path.isfile(base_dir + '/accounts/about.md'):
with open(base_dir + '/accounts/about.md', 'r',
encoding='utf-8') as fp_about:
if os.path.isfile(dir_str + '/about.md'):
with open(dir_str + '/about.md', 'r', encoding='utf-8') as fp_about:
about_text = markdown_to_html(fp_about.read())
about_form = ''

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Accessibility"
import os
from utils import data_dir
from utils import is_account_dir
from utils import load_json
from utils import get_config_param
@ -21,11 +22,12 @@ def load_access_keys_for_accounts(base_dir: str, key_shortcuts: {},
access_keys_template: {}) -> None:
"""Loads key shortcuts for each account
"""
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue
account_dir = os.path.join(base_dir + '/accounts', acct)
account_dir = os.path.join(dir_str, acct)
access_keys_filename = account_dir + '/access_keys.json'
if not os.path.isfile(access_keys_filename):
continue

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Web Interface Columns"
import os
from utils import data_dir
from utils import get_config_param
from utils import get_nickname_from_actor
from utils import is_editor
@ -28,7 +29,7 @@ from shares import share_category_icon
def _links_exist(base_dir: str) -> bool:
"""Returns true if links have been created
"""
links_filename = base_dir + '/accounts/links.txt'
links_filename = data_dir(base_dir) + '/links.txt'
return os.path.isfile(links_filename)
@ -214,7 +215,7 @@ def get_left_column_content(base_dir: str, nickname: str, domain_full: str,
# flag used not to show the first separator
first_separator_added = False
links_filename = base_dir + '/accounts/links.txt'
links_filename = data_dir(base_dir) + '/links.txt'
links_file_contains_entries = False
links_list = None
if os.path.isfile(links_filename):
@ -490,7 +491,7 @@ def html_edit_links(translate: {}, base_dir: str, path: str,
edit_links_form += \
' </div>\n'
links_filename = base_dir + '/accounts/links.txt'
links_filename = data_dir(base_dir) + '/links.txt'
links_str = ''
if os.path.isfile(links_filename):
with open(links_filename, 'r', encoding='utf-8') as fp_links:
@ -515,7 +516,7 @@ def html_edit_links(translate: {}, base_dir: str, path: str,
admin_nickname = get_config_param(base_dir, 'admin')
if admin_nickname:
if nickname == admin_nickname:
about_filename = base_dir + '/accounts/about.md'
about_filename = data_dir(base_dir) + '/about.md'
about_str = ''
if os.path.isfile(about_filename):
with open(about_filename, 'r', encoding='utf-8') as fp_about:
@ -534,7 +535,7 @@ def html_edit_links(translate: {}, base_dir: str, path: str,
edit_links_form += \
'</div>'
tos_filename = base_dir + '/accounts/tos.md'
tos_filename = data_dir(base_dir) + '/tos.md'
tos_str = ''
if os.path.isfile(tos_filename):
with open(tos_filename, 'r', encoding='utf-8') as fp_tos:
@ -553,7 +554,7 @@ def html_edit_links(translate: {}, base_dir: str, path: str,
edit_links_form += \
'</div>'
specification_filename = base_dir + '/accounts/activitypub.md'
specification_filename = data_dir(base_dir) + '/activitypub.md'
specification_str = ''
if os.path.isfile(specification_filename):
with open(specification_filename, 'r',

View File

@ -10,6 +10,7 @@ __module_group__ = "Web Interface Columns"
import os
from content import remove_long_words
from content import limit_repeated_words
from utils import data_dir
from utils import get_image_extensions
from utils import get_fav_filename_from_url
from utils import get_base_content_from_post
@ -131,7 +132,8 @@ def get_right_column_content(base_dir: str, nickname: str, domain_full: str,
# show the edit icon
if editor:
if os.path.isfile(base_dir + '/accounts/newswiremoderation.txt'):
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/newswiremoderation.txt'):
# show the edit icon highlighted
html_str += \
' <a href="' + \
@ -606,7 +608,7 @@ def html_edit_newswire(translate: {}, base_dir: str, path: str,
edit_newswire_form += \
' </div>\n'
newswire_filename = base_dir + '/accounts/newswire.txt'
newswire_filename = data_dir(base_dir) + '/newswire.txt'
newswire_str = ''
if os.path.isfile(newswire_filename):
with open(newswire_filename, 'r', encoding='utf-8') as fp_news:
@ -629,7 +631,7 @@ def html_edit_newswire(translate: {}, base_dir: str, path: str,
filter_str = ''
filter_filename = \
base_dir + '/accounts/news@' + domain + '/filters.txt'
data_dir(base_dir) + '/news@' + domain + '/filters.txt'
if os.path.isfile(filter_filename):
with open(filter_filename, 'r', encoding='utf-8') as filterfile:
filter_str = filterfile.read()
@ -660,8 +662,7 @@ def html_edit_newswire(translate: {}, base_dir: str, path: str,
'spellcheck="true">' + dogwhistle_str + '</textarea>\n'
hashtag_rules_str = ''
hashtag_rules_filename = \
base_dir + '/accounts/hashtagrules.txt'
hashtag_rules_filename = data_dir(base_dir) + '/hashtagrules.txt'
if os.path.isfile(hashtag_rules_filename):
with open(hashtag_rules_filename, 'r', encoding='utf-8') as rulesfile:
hashtag_rules_str = rulesfile.read()

View File

@ -9,6 +9,7 @@ __module_group__ = "Web Interface"
import os
from shutil import copyfile
from utils import data_dir
from utils import get_full_domain
from utils import get_nickname_from_actor
from utils import get_domain_from_actor
@ -212,10 +213,11 @@ def html_confirm_follow(translate: {}, base_dir: str,
"""
follow_domain, _ = get_domain_from_actor(follow_actor)
if os.path.isfile(base_dir + '/accounts/follow-background-custom.jpg'):
if not os.path.isfile(base_dir + '/accounts/follow-background.jpg'):
copyfile(base_dir + '/accounts/follow-background-custom.jpg',
base_dir + '/accounts/follow-background.jpg')
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/follow-background-custom.jpg'):
if not os.path.isfile(dir_str + '/follow-background.jpg'):
copyfile(dir_str + '/follow-background-custom.jpg',
dir_str + '/follow-background.jpg')
css_filename = base_dir + '/epicyon-follow.css'
if os.path.isfile(base_dir + '/follow.css'):
@ -270,10 +272,11 @@ def html_confirm_unfollow(translate: {}, base_dir: str,
"""
follow_domain, _ = get_domain_from_actor(follow_actor)
if os.path.isfile(base_dir + '/accounts/follow-background-custom.jpg'):
if not os.path.isfile(base_dir + '/accounts/follow-background.jpg'):
copyfile(base_dir + '/accounts/follow-background-custom.jpg',
base_dir + '/accounts/follow-background.jpg')
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/follow-background-custom.jpg'):
if not os.path.isfile(dir_str + '/follow-background.jpg'):
copyfile(dir_str + '/follow-background-custom.jpg',
dir_str + '/follow-background.jpg')
css_filename = base_dir + '/epicyon-follow.css'
if os.path.isfile(base_dir + '/follow.css'):

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Web Interface"
import os
from utils import data_dir
from utils import is_premium_account
from utils import dangerous_markup
from utils import remove_html
@ -510,8 +511,9 @@ def html_new_post(edit_post_params: {},
'<h1>' + translate['Write your report below.'] + '</h1>\n'
# custom report header with any additional instructions
if os.path.isfile(base_dir + '/accounts/report.txt'):
with open(base_dir + '/accounts/report.txt', 'r',
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/report.txt'):
with open(dir_str + '/report.txt', 'r',
encoding='utf-8') as file:
custom_report_text = file.read()
if '</p>' not in custom_report_text:
@ -548,11 +550,10 @@ def html_new_post(edit_post_params: {},
translate['Enter the choices for your question below.'] + \
'</h1>\n'
if os.path.isfile(base_dir + '/accounts/newpost.txt'):
with open(base_dir + '/accounts/newpost.txt', 'r',
encoding='utf-8') as file:
new_post_text = \
'<p>' + file.read() + '</p>\n'
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/newpost.txt'):
with open(dir_str + '/newpost.txt', 'r', encoding='utf-8') as file:
new_post_text = '<p>' + file.read() + '</p>\n'
css_filename = base_dir + '/epicyon-profile.css'
if os.path.isfile(base_dir + '/epicyon.css'):

View File

@ -9,6 +9,7 @@ __module_group__ = "Web Interface"
import os
from datetime import datetime, timezone
from utils import data_dir
from utils import get_nickname_from_actor
from utils import get_config_param
from utils import escape_text
@ -82,7 +83,7 @@ def html_hash_tag_swarm(base_dir: str, actor: str, translate: {}) -> str:
# Load the blocked hashtags into memory.
# This avoids needing to repeatedly load the blocked file for each hashtag
blocked_str = ''
global_blocking_filename = base_dir + '/accounts/blocking.txt'
global_blocking_filename = data_dir(base_dir) + '/blocking.txt'
if os.path.isfile(global_blocking_filename):
with open(global_blocking_filename, 'r',
encoding='utf-8') as fp_block:

View File

@ -11,6 +11,7 @@ import os
import time
import filecmp
from shutil import copyfile
from utils import data_dir
from utils import get_image_extensions
from utils import get_config_param
from utils import no_of_accounts
@ -85,7 +86,7 @@ def html_login(translate: {},
'/' + login_image
if os.path.isfile(theme_image):
login_image_filename = \
base_dir + '/accounts/' + login_image
data_dir(base_dir) + '/' + login_image
if os.path.isfile(login_image_filename):
if not filecmp.cmp(theme_image,
login_image_filename):
@ -98,7 +99,7 @@ def html_login(translate: {},
if not login_image_filename:
for ext in extensions:
login_image = 'login.' + ext
image_filename = base_dir + '/accounts/' + login_image
image_filename = data_dir(base_dir) + '/' + login_image
if os.path.isfile(image_filename):
login_image_filename = image_filename
break
@ -106,7 +107,7 @@ def html_login(translate: {},
# no login image found, so use the default
if not login_image_filename:
login_image = 'login.png'
login_image_filename = base_dir + '/accounts/' + login_image
login_image_filename = data_dir(base_dir) + '/' + login_image
source_image = base_dir + '/img/login.png'
copyfile(source_image, login_image_filename)
@ -129,10 +130,10 @@ def html_login(translate: {},
'<p class="login-text">' + \
translate['You will become the admin of this site.'] + \
'</p>'
if os.path.isfile(base_dir + '/accounts/login.txt'):
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/login.txt'):
# custom login message
with open(base_dir + '/accounts/login.txt', 'r',
encoding='utf-8') as file:
with open(dir_str + '/login.txt', 'r', encoding='utf-8') as file:
login_text = '<p class="login-text">' + file.read() + '</p>'
css_filename = base_dir + '/epicyon-login.css'

View File

@ -9,6 +9,7 @@ __module_group__ = "Web Interface"
import os
from shutil import copyfile
from utils import data_dir
from utils import get_config_param
from webapp_utils import html_header_with_website_markup
from webapp_utils import html_footer
@ -23,13 +24,14 @@ def html_manual(base_dir: str, http_prefix: str,
"""
manual_filename = base_dir + '/manual/manual.md'
admin_nickname = get_config_param(base_dir, 'admin')
if os.path.isfile(base_dir + '/accounts/manual.md'):
manual_filename = base_dir + '/accounts/manual.md'
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/manual.md'):
manual_filename = dir_str + '/manual.md'
if os.path.isfile(base_dir + '/accounts/login-background-custom.jpg'):
if not os.path.isfile(base_dir + '/accounts/login-background.jpg'):
copyfile(base_dir + '/accounts/login-background-custom.jpg',
base_dir + '/accounts/login-background.jpg')
if os.path.isfile(dir_str + '/login-background-custom.jpg'):
if not os.path.isfile(dir_str + '/login-background.jpg'):
copyfile(dir_str + '/login-background-custom.jpg',
dir_str + '/login-background.jpg')
manual_text = 'User Manual.'
if os.path.isfile(manual_filename):

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Timeline"
import os
from utils import data_dir
from utils import string_ends_with
from utils import valid_url_prefix
@ -16,7 +17,7 @@ def load_peertube_instances(base_dir: str, peertube_instances: []) -> None:
"""Loads peertube instances from file into the given list
"""
peertube_list = None
peertube_instances_filename = base_dir + '/accounts/peertube.txt'
peertube_instances_filename = data_dir(base_dir) + '/peertube.txt'
if os.path.isfile(peertube_instances_filename):
with open(peertube_instances_filename, 'r',
encoding='utf-8') as fp_inst:

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Moderation"
import os
from utils import data_dir
from utils import get_url_from_post
from utils import remove_html
from utils import is_artist
@ -361,7 +362,8 @@ def html_moderation_info(translate: {}, base_dir: str,
info_shown = False
accounts = []
for _, dirs, _ in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, _ in os.walk(dir_str):
for acct in dirs:
if not is_account_dir(acct):
continue
@ -382,9 +384,10 @@ def html_moderation_info(translate: {}, base_dir: str,
info_form += '<tr>\n'
col = 0
dir_str = data_dir(base_dir)
for acct in accounts:
acct_nickname = acct.split('@')[0]
account_dir = os.path.join(base_dir + '/accounts', acct)
account_dir = os.path.join(dir_str, acct)
actor_json = load_json(account_dir + '.json')
if not actor_json:
continue
@ -420,7 +423,7 @@ def html_moderation_info(translate: {}, base_dir: str,
if len(accounts) > 10:
info_form += '</details>\n'
suspended_filename = base_dir + '/accounts/suspended.txt'
suspended_filename = dir_str + '/suspended.txt'
if os.path.isfile(suspended_filename):
with open(suspended_filename, 'r', encoding='utf-8') as fp_sus:
suspended_str = fp_sus.read()
@ -436,10 +439,9 @@ def html_moderation_info(translate: {}, base_dir: str,
info_form += '</div>\n'
info_shown = True
blocking_filename = base_dir + '/accounts/blocking.txt'
blocking_filename = dir_str + '/blocking.txt'
if os.path.isfile(blocking_filename):
blocking_reasons_filename = \
base_dir + '/accounts/blocking_reasons.txt'
blocking_reasons_filename = dir_str + '/blocking_reasons.txt'
blocking_reasons_exist = False
if os.path.isfile(blocking_reasons_filename):
blocking_reasons_exist = True
@ -475,7 +477,7 @@ def html_moderation_info(translate: {}, base_dir: str,
info_form += '</div>\n'
info_shown = True
filters_filename = base_dir + '/accounts/filters.txt'
filters_filename = dir_str + '/filters.txt'
if os.path.isfile(filters_filename):
with open(filters_filename, 'r', encoding='utf-8') as fp_filt:
filtered_str = fp_filt.read()

View File

@ -12,6 +12,7 @@ from shutil import copyfile
from petnames import get_pet_name
from person import is_person_snoozed
from posts import is_moderator
from utils import data_dir
from utils import quote_toots_allowed
from utils import get_full_domain
from utils import get_config_param
@ -177,10 +178,11 @@ def html_person_options(default_timeline: str,
return None
options_domain_full = get_full_domain(options_domain, options_port)
if os.path.isfile(base_dir + '/accounts/options-background-custom.jpg'):
if not os.path.isfile(base_dir + '/accounts/options-background.jpg'):
copyfile(base_dir + '/accounts/options-background.jpg',
base_dir + '/accounts/options-background.jpg')
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/options-background-custom.jpg'):
if not os.path.isfile(dir_str + '/options-background.jpg'):
copyfile(dir_str + '/options-background.jpg',
dir_str + '/options-background.jpg')
dormant = False
offline = False
@ -558,7 +560,7 @@ def html_person_options(default_timeline: str,
(is_moderator(base_dir, nickname) and
not is_moderator(base_dir, options_nickname))):
newswire_blocked_filename = \
base_dir + '/accounts/' + \
dir_str + '/' + \
options_nickname + '@' + options_domain + \
'/.nonewswire'
checkbox_str = \
@ -579,7 +581,7 @@ def html_person_options(default_timeline: str,
# the newswire
if newswire_posts_permitted:
moderated_filename = \
base_dir + '/accounts/' + \
dir_str + '/' + \
options_nickname + '@' + \
options_domain + '/.newswiremoderated'
checkbox_str = \

View File

@ -12,6 +12,7 @@ import html
import datetime
import urllib.parse
from shutil import copyfile
from utils import data_dir
from utils import get_url_from_post
from utils import get_config_param
from utils import remove_html
@ -310,10 +311,11 @@ def html_podcast_episode(translate: {},
if os.path.isfile(base_dir + '/podcast.css'):
css_filename = base_dir + '/podcast.css'
if os.path.isfile(base_dir + '/accounts/podcast-background-custom.jpg'):
if not os.path.isfile(base_dir + '/accounts/podcast-background.jpg'):
copyfile(base_dir + '/accounts/podcast-background.jpg',
base_dir + '/accounts/podcast-background.jpg')
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/podcast-background-custom.jpg'):
if not os.path.isfile(dir_str + '/podcast-background.jpg'):
copyfile(dir_str + '/podcast-background.jpg',
dir_str + '/podcast-background.jpg')
instance_title = get_config_param(base_dir, 'instanceTitle')
podcast_str = \

View File

@ -24,6 +24,7 @@ from posts import post_is_muted
from posts import get_person_box
from posts import download_announce
from posts import populate_replies_json
from utils import data_dir
from utils import get_quote_toot_url
from utils import get_post_attachments
from utils import get_url_from_post
@ -1905,7 +1906,7 @@ def _substitute_onion_domains(base_dir: str, content: str) -> str:
"twitter.com": twitter_onion
}
onion_domains_filename = base_dir + '/accounts/onion_domains.txt'
onion_domains_filename = data_dir(base_dir) + '/onion_domains.txt'
if os.path.isfile(onion_domains_filename):
onion_domains_list = []
try:

View File

@ -10,6 +10,7 @@ __module_group__ = "Web Interface"
import os
from pprint import pprint
from webfinger import webfinger_handle
from utils import data_dir
from utils import is_premium_account
from utils import time_days_ago
from utils import uninvert_text
@ -960,7 +961,7 @@ def html_profile(signing_priv_key_pem: str,
"""
show_moved_accounts = False
if authorized:
moved_accounts_filename = base_dir + '/accounts/actors_moved.txt'
moved_accounts_filename = data_dir(base_dir) + '/actors_moved.txt'
if os.path.isfile(moved_accounts_filename):
show_moved_accounts = True
@ -1826,7 +1827,8 @@ def _html_profile_shares(actor: str, translate: {},
def _grayscale_enabled(base_dir: str) -> bool:
"""Is grayscale UI enabled?
"""
return os.path.isfile(base_dir + '/accounts/.grayscale')
dir_str = data_dir(base_dir)
return os.path.isfile(dir_str + '/.grayscale')
def _html_themes_dropdown(base_dir: str, translate: {}) -> str:
@ -2051,7 +2053,7 @@ def _html_edit_profile_instance(base_dir: str, translate: {},
# site moderators
moderators = ''
moderators_file = base_dir + '/accounts/moderators.txt'
moderators_file = data_dir(base_dir) + '/moderators.txt'
if os.path.isfile(moderators_file):
with open(moderators_file, 'r', encoding='utf-8') as mod_file:
moderators = mod_file.read()
@ -2061,7 +2063,7 @@ def _html_edit_profile_instance(base_dir: str, translate: {},
'moderators', moderators, 200, '', False)
# site editors
editors = ''
editors_file = base_dir + '/accounts/editors.txt'
editors_file = data_dir(base_dir) + '/editors.txt'
if os.path.isfile(editors_file):
with open(editors_file, 'r', encoding='utf-8') as edit_file:
editors = edit_file.read()
@ -2072,7 +2074,7 @@ def _html_edit_profile_instance(base_dir: str, translate: {},
# counselors
counselors = ''
counselors_file = base_dir + '/accounts/counselors.txt'
counselors_file = data_dir(base_dir) + '/counselors.txt'
if os.path.isfile(counselors_file):
with open(counselors_file, 'r', encoding='utf-8') as co_file:
counselors = co_file.read()
@ -2082,7 +2084,7 @@ def _html_edit_profile_instance(base_dir: str, translate: {},
# artists
artists = ''
artists_file = base_dir + '/accounts/artists.txt'
artists_file = data_dir(base_dir) + '/artists.txt'
if os.path.isfile(artists_file):
with open(artists_file, 'r', encoding='utf-8') as art_file:
artists = art_file.read()
@ -2092,7 +2094,7 @@ def _html_edit_profile_instance(base_dir: str, translate: {},
# site devops
devops = ''
devops_file = base_dir + '/accounts/devops.txt'
devops_file = data_dir(base_dir) + '/devops.txt'
if os.path.isfile(devops_file):
with open(devops_file, 'r', encoding='utf-8') as edit_file:
devops = edit_file.read()

View File

@ -10,6 +10,7 @@ __module_group__ = "Web Interface"
import os
from shutil import copyfile
import urllib.parse
from utils import data_dir
from utils import get_post_attachments
from utils import get_url_from_post
from utils import date_from_string_format
@ -306,7 +307,8 @@ def html_search_shared_items(translate: {},
'<center><h1>' + \
'<a href="' + actor + '/search">' + title_str + '</a></h1></center>'
results_exist = False
for _, dirs, files in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, files in os.walk(dir_str):
for handle in dirs:
if not is_account_dir(handle):
continue
@ -547,7 +549,8 @@ def html_skills_search(actor: str, translate: {}, base_dir: str,
results = []
# search instance accounts
for subdir, _, files in os.walk(base_dir + '/accounts/'):
dir_str = data_dir(base_dir)
for subdir, _, files in os.walk(dir_str + '/'):
for fname in files:
if not fname.endswith('.json'):
continue

View File

@ -9,6 +9,7 @@ __module_group__ = "Web Interface"
import os
from shutil import copyfile
from utils import data_dir
from utils import get_config_param
from webapp_utils import html_header_with_website_markup
from webapp_utils import html_footer
@ -23,13 +24,14 @@ def html_specification(base_dir: str, http_prefix: str,
"""
specification_filename = base_dir + '/specification/activitypub.md'
admin_nickname = get_config_param(base_dir, 'admin')
if os.path.isfile(base_dir + '/accounts/activitypub.md'):
specification_filename = base_dir + '/accounts/activitypub.md'
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/activitypub.md'):
specification_filename = dir_str + '/activitypub.md'
if os.path.isfile(base_dir + '/accounts/login-background-custom.jpg'):
if not os.path.isfile(base_dir + '/accounts/login-background.jpg'):
copyfile(base_dir + '/accounts/login-background-custom.jpg',
base_dir + '/accounts/login-background.jpg')
if os.path.isfile(dir_str + '/login-background-custom.jpg'):
if not os.path.isfile(dir_str + '/login-background.jpg'):
copyfile(dir_str + '/login-background-custom.jpg',
dir_str + '/login-background.jpg')
specification_text = 'ActivityPub Protocol Specification.'
if os.path.isfile(specification_filename):

View File

@ -8,6 +8,7 @@ __status__ = "Production"
__module_group__ = "Web Interface"
import os
from utils import data_dir
from utils import load_json
from utils import get_config_param
from webapp_utils import html_header_with_external_style
@ -178,7 +179,7 @@ def html_theme_designer(base_dir: str,
theme_json = load_json(theme_filename)
# set custom theme parameters
custom_variables_file = base_dir + '/accounts/theme.json'
custom_variables_file = data_dir(base_dir) + '/theme.json'
if os.path.isfile(custom_variables_file):
custom_theme_params = load_json(custom_variables_file, 0)
if custom_theme_params:

View File

@ -10,6 +10,7 @@ __module_group__ = "Timeline"
import os
import time
from shutil import copyfile
from utils import data_dir
from utils import is_artist
from utils import dangerous_markup
from utils import get_config_param
@ -60,7 +61,7 @@ def _get_help_for_timeline(base_dir: str, box_name: str) -> str:
"""Shows help text for the given timeline
"""
# get the filename for help for this timeline
help_filename = base_dir + '/accounts/help_' + box_name + '.md'
help_filename = data_dir(base_dir) + '/help_' + box_name + '.md'
if not os.path.isfile(help_filename):
language = \
get_config_param(base_dir, 'language')

View File

@ -9,6 +9,7 @@ __module_group__ = "Web Interface"
import os
from shutil import copyfile
from utils import data_dir
from utils import get_config_param
from utils import local_actor_url
from webapp_utils import html_header_with_external_style
@ -21,19 +22,19 @@ def html_terms_of_service(base_dir: str,
"""Show the terms of service screen
"""
admin_nickname = get_config_param(base_dir, 'admin')
if not os.path.isfile(base_dir + '/accounts/tos.md'):
dir_str = data_dir(base_dir)
if not os.path.isfile(dir_str + '/tos.md'):
copyfile(base_dir + '/default_tos.md',
base_dir + '/accounts/tos.md')
dir_str + '/tos.md')
if os.path.isfile(base_dir + '/accounts/login-background-custom.jpg'):
if not os.path.isfile(base_dir + '/accounts/login-background.jpg'):
copyfile(base_dir + '/accounts/login-background-custom.jpg',
base_dir + '/accounts/login-background.jpg')
if os.path.isfile(dir_str + '/login-background-custom.jpg'):
if not os.path.isfile(dir_str + '/login-background.jpg'):
copyfile(dir_str + '/login-background-custom.jpg',
dir_str + '/login-background.jpg')
tos_text = 'Terms of Service go here.'
if os.path.isfile(base_dir + '/accounts/tos.md'):
with open(base_dir + '/accounts/tos.md', 'r',
encoding='utf-8') as file:
if os.path.isfile(dir_str + '/tos.md'):
with open(dir_str + '/tos.md', 'r', encoding='utf-8') as file:
tos_text = markdown_to_html(file.read())
tos_form = ''

View File

@ -12,6 +12,7 @@ from shutil import copyfile
from collections import OrderedDict
from session import get_json
from session import get_json_valid
from utils import data_dir
from utils import string_contains
from utils import get_post_attachments
from utils import image_mime_types_dict
@ -471,7 +472,8 @@ def shares_timeline_json(actor: str, page_number: int, items_per_page: int,
by sharing a large number of things
"""
all_shares_json = {}
for _, dirs, files in os.walk(base_dir + '/accounts'):
dir_str = data_dir(base_dir)
for _, dirs, files in os.walk(dir_str):
for handle in dirs:
if not is_account_dir(handle):
continue
@ -2093,10 +2095,11 @@ def set_custom_background(base_dir: str, background: str,
if os.path.isfile(base_dir + '/img/' + background + '.' + ext):
if not new_background:
new_background = background
if not os.path.isfile(base_dir + '/accounts/' +
dir_str = data_dir(base_dir)
if not os.path.isfile(dir_str + '/' +
new_background + '.' + ext):
copyfile(base_dir + '/img/' + background + '.' + ext,
base_dir + '/accounts/' + new_background + '.' + ext)
dir_str + '/' + new_background + '.' + ext)
return ext
return None
@ -2109,7 +2112,7 @@ def html_common_emoji(base_dir: str, no_of_emoji: int) -> str:
emojis_filename = base_dir + '/emoji/default_emoji.json'
emojis_json = load_json(emojis_filename)
common_emoji_filename = base_dir + '/accounts/common_emoji.txt'
common_emoji_filename = data_dir(base_dir) + '/common_emoji.txt'
if not os.path.isfile(common_emoji_filename):
return ''
common_emoji = None
@ -2365,7 +2368,7 @@ def get_buy_links(post_json_object: str, translate: {}, buy_sites: {}) -> {}:
def load_buy_sites(base_dir: str) -> {}:
"""Loads domains from which buying is permitted
"""
buy_sites_filename = base_dir + '/accounts/buy_sites.json'
buy_sites_filename = data_dir(base_dir) + '/buy_sites.json'
if os.path.isfile(buy_sites_filename):
buy_sites_json = load_json(buy_sites_filename)
if buy_sites_json:

View File

@ -9,6 +9,7 @@ __module_group__ = "Onboarding"
import os
from shutil import copyfile
from utils import data_dir
from utils import get_config_param
from utils import remove_html
from utils import acct_dir
@ -51,13 +52,14 @@ def html_welcome_screen(base_dir: str, nickname: str,
"""Returns the welcome screen
"""
# set a custom background for the welcome screen
if os.path.isfile(base_dir + '/accounts/welcome-background-custom.jpg'):
if not os.path.isfile(base_dir + '/accounts/welcome-background.jpg'):
copyfile(base_dir + '/accounts/welcome-background-custom.jpg',
base_dir + '/accounts/welcome-background.jpg')
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/welcome-background-custom.jpg'):
if not os.path.isfile(dir_str + '/welcome-background.jpg'):
copyfile(dir_str + '/welcome-background-custom.jpg',
dir_str + '/welcome-background.jpg')
welcome_text = 'Welcome to Epicyon'
welcome_filename = base_dir + '/accounts/' + curr_screen + '.md'
welcome_filename = dir_str + '/' + curr_screen + '.md'
if not os.path.isfile(welcome_filename):
default_filename = None
if theme_name:

View File

@ -9,6 +9,7 @@ __module_group__ = "Onboarding"
import os
from shutil import copyfile
from utils import data_dir
from utils import remove_html
from utils import get_config_param
from webapp_utils import html_header_with_external_style
@ -22,13 +23,14 @@ def html_welcome_final(base_dir: str, nickname: str,
"""Returns the final welcome screen after first login
"""
# set a custom background for the welcome screen
if os.path.isfile(base_dir + '/accounts/welcome-background-custom.jpg'):
if not os.path.isfile(base_dir + '/accounts/welcome-background.jpg'):
copyfile(base_dir + '/accounts/welcome-background-custom.jpg',
base_dir + '/accounts/welcome-background.jpg')
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/welcome-background-custom.jpg'):
if not os.path.isfile(dir_str + '/welcome-background.jpg'):
copyfile(dir_str + '/welcome-background-custom.jpg',
dir_str + '/welcome-background.jpg')
final_text = 'Welcome to Epicyon'
final_filename = base_dir + '/accounts/welcome_final.md'
final_filename = dir_str + '/welcome_final.md'
if not os.path.isfile(final_filename):
default_filename = None
if theme_name:

View File

@ -9,6 +9,7 @@ __module_group__ = "Onboarding"
import os
from shutil import copyfile
from utils import data_dir
from utils import remove_html
from utils import load_json
from utils import get_config_param
@ -29,13 +30,14 @@ def html_welcome_profile(base_dir: str, nickname: str, domain: str,
"""Returns the welcome profile screen to set avatar and bio
"""
# set a custom background for the welcome screen
if os.path.isfile(base_dir + '/accounts/welcome-background-custom.jpg'):
if not os.path.isfile(base_dir + '/accounts/welcome-background.jpg'):
copyfile(base_dir + '/accounts/welcome-background-custom.jpg',
base_dir + '/accounts/welcome-background.jpg')
dir_str = data_dir(base_dir)
if os.path.isfile(dir_str + '/welcome-background-custom.jpg'):
if not os.path.isfile(dir_str + '/welcome-background.jpg'):
copyfile(dir_str + '/welcome-background-custom.jpg',
dir_str + '/welcome-background.jpg')
profile_text = 'Welcome to Epicyon'
profile_filename = base_dir + '/accounts/welcome_profile.md'
profile_filename = dir_str + '/welcome_profile.md'
if not os.path.isfile(profile_filename):
default_filename = None
if theme_name: