\n'
+ article_added = True
# get the handle of the author
- if postJsonObject['object'].get('attributedTo'):
- authorNickname = None
- if isinstance(postJsonObject['object']['attributedTo'], str):
- actor = postJsonObject['object']['attributedTo']
- authorNickname = getNicknameFromActor(actor)
- if authorNickname:
- authorDomain, authorPort = getDomainFromActor(actor)
- if authorDomain:
+ if post_json_object['object'].get('attributedTo'):
+ author_nickname = None
+ if isinstance(post_json_object['object']['attributedTo'], str):
+ actor = post_json_object['object']['attributedTo']
+ author_nickname = get_nickname_from_actor(actor)
+ if author_nickname:
+ author_domain, _ = get_domain_from_actor(actor)
+ if author_domain:
# author must be from the given domain
- if restrictToDomain and authorDomain != domain:
+ if restrict_to_domain and author_domain != domain:
return ''
- handle = authorNickname + '@' + authorDomain
+ handle = author_nickname + '@' + author_domain
else:
# posts from the domain are expected to have an attributedTo field
- if restrictToDomain:
+ if restrict_to_domain:
return ''
- if postJsonObject['object'].get('published'):
- if 'T' in postJsonObject['object']['published']:
- blogStr += '
' + \
- postJsonObject['object']['published'].split('T')[0]
+ if post_json_object['object'].get('published'):
+ if 'T' in post_json_object['object']['published']:
+ blog_str += '
'
- instanceTitle = \
- getConfigParam(baseDir, 'instanceTitle')
- editBlogForm = htmlHeaderWithExternalStyle(cssFilename, instanceTitle)
+ instance_title = \
+ get_config_param(base_dir, 'instanceTitle')
+ edit_blog_form = \
+ html_header_with_external_style(css_filename, instance_title, None)
- editBlogForm += \
+ edit_blog_form += \
''
- editBlogForm = editBlogForm.replace('',
- '')
-
- editBlogForm += htmlFooter()
- return editBlogForm
+ edit_blog_form += html_footer()
+ return edit_blog_form
-def pathContainsBlogLink(baseDir: str,
- httpPrefix: str, domain: str,
- domainFull: str, path: str) -> (str, str):
+def path_contains_blog_link(base_dir: str,
+ http_prefix: str, domain: str,
+ domain_full: str, path: str) -> (str, str):
"""If the path contains a blog entry then return its filename
"""
if '/users/' not in path:
return None, None
- userEnding = path.split('/users/', 1)[1]
- if '/' not in userEnding:
+ user_ending = path.split('/users/', 1)[1]
+ if '/' not in user_ending:
return None, None
- userEnding2 = userEnding.split('/')
- nickname = userEnding2[0]
- if len(userEnding2) != 2:
+ user_ending2 = user_ending.split('/')
+ nickname = user_ending2[0]
+ if len(user_ending2) != 2:
return None, None
- if len(userEnding2[1]) < 14:
+ if len(user_ending2[1]) < 14:
return None, None
- userEnding2[1] = userEnding2[1].strip()
- if not userEnding2[1].isdigit():
+ user_ending2[1] = user_ending2[1].strip()
+ if not user_ending2[1].isdigit():
return None, None
# check for blog posts
- blogIndexFilename = acctDir(baseDir, nickname, domain) + '/tlblogs.index'
- if not os.path.isfile(blogIndexFilename):
+ blog_index_filename = \
+ acct_dir(base_dir, nickname, domain) + '/tlblogs.index'
+ if not os.path.isfile(blog_index_filename):
return None, None
- if '#' + userEnding2[1] + '.' not in open(blogIndexFilename).read():
+ if not text_in_file('#' + user_ending2[1] + '.', blog_index_filename):
return None, None
- messageId = localActorUrl(httpPrefix, nickname, domainFull) + \
- '/statuses/' + userEnding2[1]
- return locatePost(baseDir, nickname, domain, messageId), nickname
+ message_id = local_actor_url(http_prefix, nickname, domain_full) + \
+ '/statuses/' + user_ending2[1]
+ return locate_post(base_dir, nickname, domain, message_id), nickname
-def getBlogAddress(actorJson: {}) -> str:
+def get_blog_address(actor_json: {}) -> str:
"""Returns blog address for the given actor
"""
- return getActorPropertyUrl(actorJson, 'Blog')
+ return get_actor_property_url(actor_json, 'Blog')
diff --git a/bookmarks.py b/bookmarks.py
index fb0c3f769..b14453d90 100644
--- a/bookmarks.py
+++ b/bookmarks.py
@@ -1,7 +1,7 @@
__filename__ = "bookmarks.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
-__version__ = "1.2.0"
+__version__ = "1.3.0"
__maintainer__ = "Bob Mottram"
__email__ = "bob@libreserver.org"
__status__ = "Production"
@@ -9,649 +9,662 @@ __module_group__ = "Timeline"
import os
from pprint import pprint
-from webfinger import webfingerHandle
-from auth import createBasicAuthHeader
-from utils import removeDomainPort
-from utils import hasUsersPath
-from utils import getFullDomain
-from utils import removeIdEnding
-from utils import removePostFromCache
-from utils import urlPermitted
-from utils import getNicknameFromActor
-from utils import getDomainFromActor
-from utils import locatePost
-from utils import getCachedPostFilename
-from utils import loadJson
-from utils import saveJson
-from utils import hasObjectDict
-from utils import acctDir
-from utils import localActorUrl
-from posts import getPersonBox
-from session import postJson
+from webfinger import webfinger_handle
+from auth import create_basic_auth_header
+from utils import remove_domain_port
+from utils import has_users_path
+from utils import get_full_domain
+from utils import remove_id_ending
+from utils import remove_post_from_cache
+from utils import url_permitted
+from utils import get_nickname_from_actor
+from utils import get_domain_from_actor
+from utils import locate_post
+from utils import get_cached_post_filename
+from utils import load_json
+from utils import save_json
+from utils import has_object_dict
+from utils import acct_dir
+from utils import local_actor_url
+from utils import has_actor
+from utils import has_object_string_type
+from utils import text_in_file
+from utils import remove_eol
+from posts import get_person_box
+from session import post_json
-def undoBookmarksCollectionEntry(recentPostsCache: {},
- baseDir: str, postFilename: str,
- objectUrl: str,
- actor: str, domain: str, debug: bool) -> None:
+def undo_bookmarks_collection_entry(recent_posts_cache: {},
+ base_dir: str, post_filename: str,
+ actor: str, domain: str,
+ debug: bool) -> None:
"""Undoes a bookmark for a particular actor
"""
- postJsonObject = loadJson(postFilename)
- if not postJsonObject:
+ post_json_object = load_json(post_filename)
+ if not post_json_object:
return
# remove any cached version of this post so that the
# bookmark icon is changed
- nickname = getNicknameFromActor(actor)
- cachedPostFilename = getCachedPostFilename(baseDir, nickname,
- domain, postJsonObject)
- if cachedPostFilename:
- if os.path.isfile(cachedPostFilename):
+ nickname = get_nickname_from_actor(actor)
+ if not nickname:
+ return
+ cached_post_filename = \
+ get_cached_post_filename(base_dir, nickname,
+ domain, post_json_object)
+ if cached_post_filename:
+ if os.path.isfile(cached_post_filename):
try:
- os.remove(cachedPostFilename)
- except BaseException:
- pass
- removePostFromCache(postJsonObject, recentPostsCache)
+ os.remove(cached_post_filename)
+ except OSError:
+ if debug:
+ print('EX: undo_bookmarks_collection_entry ' +
+ 'unable to delete cached post file ' +
+ str(cached_post_filename))
+ remove_post_from_cache(post_json_object, recent_posts_cache)
# remove from the index
- bookmarksIndexFilename = \
- acctDir(baseDir, nickname, domain) + '/bookmarks.index'
- if not os.path.isfile(bookmarksIndexFilename):
+ bookmarks_index_filename = \
+ acct_dir(base_dir, nickname, domain) + '/bookmarks.index'
+ if not os.path.isfile(bookmarks_index_filename):
return
- if '/' in postFilename:
- bookmarkIndex = postFilename.split('/')[-1].strip()
+ if '/' in post_filename:
+ bookmark_index = post_filename.split('/')[-1].strip()
else:
- bookmarkIndex = postFilename.strip()
- bookmarkIndex = bookmarkIndex.replace('\n', '').replace('\r', '')
- if bookmarkIndex not in open(bookmarksIndexFilename).read():
+ bookmark_index = post_filename.strip()
+ bookmark_index = remove_eol(bookmark_index)
+ if not text_in_file(bookmark_index, bookmarks_index_filename):
return
- indexStr = ''
- with open(bookmarksIndexFilename, 'r') as indexFile:
- indexStr = indexFile.read().replace(bookmarkIndex + '\n', '')
- with open(bookmarksIndexFilename, 'w+') as bookmarksIndexFile:
- bookmarksIndexFile.write(indexStr)
-
- if not postJsonObject.get('type'):
+ index_str = ''
+ try:
+ with open(bookmarks_index_filename, 'r',
+ encoding='utf-8') as index_file:
+ index_str = index_file.read().replace(bookmark_index + '\n', '')
+ except OSError:
+ print('EX: unable to read ' + bookmarks_index_filename)
+ if index_str:
+ try:
+ with open(bookmarks_index_filename, 'w+',
+ encoding='utf-8') as bmi_file:
+ bmi_file.write(index_str)
+ except OSError:
+ print('EX: unable to write bookmarks index ' +
+ bookmarks_index_filename)
+ if not post_json_object.get('type'):
return
- if postJsonObject['type'] != 'Create':
+ if post_json_object['type'] != 'Create':
return
- if not hasObjectDict(postJsonObject):
+ if not has_object_dict(post_json_object):
if debug:
print('DEBUG: bookmarked post has no object ' +
- str(postJsonObject))
+ str(post_json_object))
return
- if not postJsonObject['object'].get('bookmarks'):
+ if not post_json_object['object'].get('bookmarks'):
return
- if not isinstance(postJsonObject['object']['bookmarks'], dict):
+ if not isinstance(post_json_object['object']['bookmarks'], dict):
return
- if not postJsonObject['object']['bookmarks'].get('items'):
+ if not post_json_object['object']['bookmarks'].get('items'):
return
- totalItems = 0
- if postJsonObject['object']['bookmarks'].get('totalItems'):
- totalItems = postJsonObject['object']['bookmarks']['totalItems']
- itemFound = False
- for bookmarkItem in postJsonObject['object']['bookmarks']['items']:
- if bookmarkItem.get('actor'):
- if bookmarkItem['actor'] == actor:
+ total_items = 0
+ if post_json_object['object']['bookmarks'].get('totalItems'):
+ total_items = post_json_object['object']['bookmarks']['totalItems']
+ item_found = False
+ for bookmark_item in post_json_object['object']['bookmarks']['items']:
+ if bookmark_item.get('actor'):
+ if bookmark_item['actor'] == actor:
if debug:
print('DEBUG: bookmark was removed for ' + actor)
- bmIt = bookmarkItem
- postJsonObject['object']['bookmarks']['items'].remove(bmIt)
- itemFound = True
+ bm_it = bookmark_item
+ post_json_object['object']['bookmarks']['items'].remove(bm_it)
+ item_found = True
break
- if not itemFound:
+ if not item_found:
return
- if totalItems == 1:
+ if total_items == 1:
if debug:
print('DEBUG: bookmarks was removed from post')
- del postJsonObject['object']['bookmarks']
+ del post_json_object['object']['bookmarks']
else:
- bmItLen = len(postJsonObject['object']['bookmarks']['items'])
- postJsonObject['object']['bookmarks']['totalItems'] = bmItLen
- saveJson(postJsonObject, postFilename)
+ bm_it_len = len(post_json_object['object']['bookmarks']['items'])
+ post_json_object['object']['bookmarks']['totalItems'] = bm_it_len
+ save_json(post_json_object, post_filename)
-def bookmarkedByPerson(postJsonObject: {}, nickname: str, domain: str) -> bool:
+def bookmarked_by_person(post_json_object: {},
+ nickname: str, domain: str) -> bool:
"""Returns True if the given post is bookmarked by the given person
"""
- if _noOfBookmarks(postJsonObject) == 0:
+ if _no_of_bookmarks(post_json_object) == 0:
return False
- actorMatch = domain + '/users/' + nickname
- for item in postJsonObject['object']['bookmarks']['items']:
- if item['actor'].endswith(actorMatch):
+ actor_match = domain + '/users/' + nickname
+ for item in post_json_object['object']['bookmarks']['items']:
+ if item['actor'].endswith(actor_match):
return True
return False
-def _noOfBookmarks(postJsonObject: {}) -> int:
+def _no_of_bookmarks(post_json_object: {}) -> int:
"""Returns the number of bookmarks ona given post
"""
- if not hasObjectDict(postJsonObject):
+ if not has_object_dict(post_json_object):
return 0
- if not postJsonObject['object'].get('bookmarks'):
+ if not post_json_object['object'].get('bookmarks'):
return 0
- if not isinstance(postJsonObject['object']['bookmarks'], dict):
+ if not isinstance(post_json_object['object']['bookmarks'], dict):
return 0
- if not postJsonObject['object']['bookmarks'].get('items'):
- postJsonObject['object']['bookmarks']['items'] = []
- postJsonObject['object']['bookmarks']['totalItems'] = 0
- return len(postJsonObject['object']['bookmarks']['items'])
+ if not post_json_object['object']['bookmarks'].get('items'):
+ post_json_object['object']['bookmarks']['items'] = []
+ post_json_object['object']['bookmarks']['totalItems'] = 0
+ return len(post_json_object['object']['bookmarks']['items'])
-def updateBookmarksCollection(recentPostsCache: {},
- baseDir: str, postFilename: str,
- objectUrl: str,
- actor: str, domain: str, debug: bool) -> None:
+def update_bookmarks_collection(recent_posts_cache: {},
+ base_dir: str, post_filename: str,
+ object_url: str,
+ actor: str, domain: str, debug: bool) -> None:
"""Updates the bookmarks collection within a post
"""
- postJsonObject = loadJson(postFilename)
- if postJsonObject:
- # remove any cached version of this post so that the
- # bookmark icon is changed
- nickname = getNicknameFromActor(actor)
- cachedPostFilename = getCachedPostFilename(baseDir, nickname,
- domain, postJsonObject)
- if cachedPostFilename:
- if os.path.isfile(cachedPostFilename):
- try:
- os.remove(cachedPostFilename)
- except BaseException:
- pass
- removePostFromCache(postJsonObject, recentPostsCache)
+ post_json_object = load_json(post_filename)
+ if not post_json_object:
+ return
- if not postJsonObject.get('object'):
- if debug:
- print('DEBUG: no object in bookmarked post ' +
- str(postJsonObject))
- return
- if not objectUrl.endswith('/bookmarks'):
- objectUrl = objectUrl + '/bookmarks'
- # does this post have bookmarks on it from differenent actors?
- if not postJsonObject['object'].get('bookmarks'):
- if debug:
- print('DEBUG: Adding initial bookmarks to ' + objectUrl)
- bookmarksJson = {
- "@context": "https://www.w3.org/ns/activitystreams",
- 'id': objectUrl,
- 'type': 'Collection',
- "totalItems": 1,
- 'items': [{
- 'type': 'Bookmark',
- 'actor': actor
- }]
- }
- postJsonObject['object']['bookmarks'] = bookmarksJson
- else:
- if not postJsonObject['object']['bookmarks'].get('items'):
- postJsonObject['object']['bookmarks']['items'] = []
- for bookmarkItem in postJsonObject['object']['bookmarks']['items']:
- if bookmarkItem.get('actor'):
- if bookmarkItem['actor'] == actor:
- return
- newBookmark = {
+ # remove any cached version of this post so that the
+ # bookmark icon is changed
+ nickname = get_nickname_from_actor(actor)
+ if not nickname:
+ return
+ cached_post_filename = \
+ get_cached_post_filename(base_dir, nickname,
+ domain, post_json_object)
+ if cached_post_filename:
+ if os.path.isfile(cached_post_filename):
+ try:
+ os.remove(cached_post_filename)
+ except OSError:
+ if debug:
+ print('EX: update_bookmarks_collection ' +
+ 'unable to delete cached post ' +
+ str(cached_post_filename))
+ remove_post_from_cache(post_json_object, recent_posts_cache)
+
+ if not post_json_object.get('object'):
+ if debug:
+ print('DEBUG: no object in bookmarked post ' +
+ str(post_json_object))
+ return
+ if not object_url.endswith('/bookmarks'):
+ object_url = object_url + '/bookmarks'
+ # does this post have bookmarks on it from differenent actors?
+ if not post_json_object['object'].get('bookmarks'):
+ if debug:
+ print('DEBUG: Adding initial bookmarks to ' + object_url)
+ bookmarks_json = {
+ "@context": "https://www.w3.org/ns/activitystreams",
+ 'id': object_url,
+ 'type': 'Collection',
+ "totalItems": 1,
+ 'items': [{
'type': 'Bookmark',
'actor': actor
- }
- nb = newBookmark
- bmIt = len(postJsonObject['object']['bookmarks']['items'])
- postJsonObject['object']['bookmarks']['items'].append(nb)
- postJsonObject['object']['bookmarks']['totalItems'] = bmIt
+ }]
+ }
+ post_json_object['object']['bookmarks'] = bookmarks_json
+ else:
+ if not post_json_object['object']['bookmarks'].get('items'):
+ post_json_object['object']['bookmarks']['items'] = []
+ bm_items = post_json_object['object']['bookmarks']['items']
+ for bookmark_item in bm_items:
+ if bookmark_item.get('actor'):
+ if bookmark_item['actor'] == actor:
+ return
+ new_bookmark = {
+ 'type': 'Bookmark',
+ 'actor': actor
+ }
+ nbook = new_bookmark
+ bm_it = len(post_json_object['object']['bookmarks']['items'])
+ post_json_object['object']['bookmarks']['items'].append(nbook)
+ post_json_object['object']['bookmarks']['totalItems'] = bm_it
- if debug:
- print('DEBUG: saving post with bookmarks added')
- pprint(postJsonObject)
+ if debug:
+ print('DEBUG: saving post with bookmarks added')
+ pprint(post_json_object)
- saveJson(postJsonObject, postFilename)
+ save_json(post_json_object, post_filename)
- # prepend to the index
- bookmarksIndexFilename = \
- acctDir(baseDir, nickname, domain) + '/bookmarks.index'
- bookmarkIndex = postFilename.split('/')[-1]
- if os.path.isfile(bookmarksIndexFilename):
- if bookmarkIndex not in open(bookmarksIndexFilename).read():
- try:
- with open(bookmarksIndexFilename, 'r+') as bmIndexFile:
- content = bmIndexFile.read()
- if bookmarkIndex + '\n' not in content:
- bmIndexFile.seek(0, 0)
- bmIndexFile.write(bookmarkIndex + '\n' + content)
- if debug:
- print('DEBUG: bookmark added to index')
- except Exception as e:
- print('WARN: Failed to write entry to bookmarks index ' +
- bookmarksIndexFilename + ' ' + str(e))
- else:
- with open(bookmarksIndexFilename, 'w+') as bookmarksIndexFile:
- bookmarksIndexFile.write(bookmarkIndex + '\n')
+ # prepend to the index
+ bookmarks_index_filename = \
+ acct_dir(base_dir, nickname, domain) + '/bookmarks.index'
+ bookmark_index = post_filename.split('/')[-1]
+ if os.path.isfile(bookmarks_index_filename):
+ if not text_in_file(bookmark_index, bookmarks_index_filename):
+ try:
+ with open(bookmarks_index_filename, 'r+',
+ encoding='utf-8') as bmi_file:
+ content = bmi_file.read()
+ if bookmark_index + '\n' not in content:
+ bmi_file.seek(0, 0)
+ bmi_file.write(bookmark_index + '\n' + content)
+ if debug:
+ print('DEBUG: bookmark added to index')
+ except OSError as ex:
+ print('WARN: Failed to write entry to bookmarks index ' +
+ bookmarks_index_filename + ' ' + str(ex))
+ else:
+ try:
+ with open(bookmarks_index_filename, 'w+',
+ encoding='utf-8') as bm_file:
+ bm_file.write(bookmark_index + '\n')
+ except OSError:
+ print('EX: unable to write bookmarks index ' +
+ bookmarks_index_filename)
-def bookmark(recentPostsCache: {},
- session, baseDir: str, federationList: [],
- nickname: str, domain: str, port: int,
- ccList: [], httpPrefix: str,
- objectUrl: str, actorBookmarked: str,
- clientToServer: bool,
- sendThreads: [], postLog: [],
- personCache: {}, cachedWebfingers: {},
- debug: bool, projectVersion: str) -> {}:
+def bookmark_post(recent_posts_cache: {},
+ base_dir: str, federation_list: [],
+ nickname: str, domain: str, port: int,
+ cc_list: [], http_prefix: str,
+ object_url: str, actor_bookmarked: str,
+ debug: bool) -> {}:
"""Creates a bookmark
actor is the person doing the bookmarking
'to' might be a specific person (actor) whose post was bookmarked
object is typically the url of the message which was bookmarked
"""
- if not urlPermitted(objectUrl, federationList):
+ if not url_permitted(object_url, federation_list):
return None
- fullDomain = getFullDomain(domain, port)
+ full_domain = get_full_domain(domain, port)
- newBookmarkJson = {
+ new_bookmark_json = {
"@context": "https://www.w3.org/ns/activitystreams",
'type': 'Bookmark',
- 'actor': localActorUrl(httpPrefix, nickname, fullDomain),
- 'object': objectUrl
+ 'actor': local_actor_url(http_prefix, nickname, full_domain),
+ 'object': object_url
}
- if ccList:
- if len(ccList) > 0:
- newBookmarkJson['cc'] = ccList
+ if cc_list:
+ if len(cc_list) > 0:
+ new_bookmark_json['cc'] = cc_list
# Extract the domain and nickname from a statuses link
- bookmarkedPostNickname = None
- bookmarkedPostDomain = None
- bookmarkedPostPort = None
- if actorBookmarked:
- acBm = actorBookmarked
- bookmarkedPostNickname = getNicknameFromActor(acBm)
- bookmarkedPostDomain, bookmarkedPostPort = getDomainFromActor(acBm)
+ bookmarked_post_nickname = None
+ if actor_bookmarked:
+ ac_bm = actor_bookmarked
+ bookmarked_post_nickname = get_nickname_from_actor(ac_bm)
+ _, _ = get_domain_from_actor(ac_bm)
else:
- if hasUsersPath(objectUrl):
- ou = objectUrl
- bookmarkedPostNickname = getNicknameFromActor(ou)
- bookmarkedPostDomain, bookmarkedPostPort = getDomainFromActor(ou)
+ if has_users_path(object_url):
+ ourl = object_url
+ bookmarked_post_nickname = get_nickname_from_actor(ourl)
+ _, _ = get_domain_from_actor(ourl)
- if bookmarkedPostNickname:
- postFilename = locatePost(baseDir, nickname, domain, objectUrl)
- if not postFilename:
- print('DEBUG: bookmark baseDir: ' + baseDir)
+ if bookmarked_post_nickname:
+ post_filename = locate_post(base_dir, nickname, domain, object_url)
+ if not post_filename:
+ print('DEBUG: bookmark base_dir: ' + base_dir)
print('DEBUG: bookmark nickname: ' + nickname)
print('DEBUG: bookmark domain: ' + domain)
- print('DEBUG: bookmark objectUrl: ' + objectUrl)
+ print('DEBUG: bookmark object_url: ' + object_url)
return None
- updateBookmarksCollection(recentPostsCache,
- baseDir, postFilename, objectUrl,
- newBookmarkJson['actor'], domain, debug)
+ update_bookmarks_collection(recent_posts_cache,
+ base_dir, post_filename, object_url,
+ new_bookmark_json['actor'], domain, debug)
- return newBookmarkJson
+ return new_bookmark_json
-def undoBookmark(recentPostsCache: {},
- session, baseDir: str, federationList: [],
- nickname: str, domain: str, port: int,
- ccList: [], httpPrefix: str,
- objectUrl: str, actorBookmarked: str,
- clientToServer: bool,
- sendThreads: [], postLog: [],
- personCache: {}, cachedWebfingers: {},
- debug: bool, projectVersion: str) -> {}:
+def undo_bookmark_post(recent_posts_cache: {},
+ base_dir: str, federation_list: [],
+ nickname: str, domain: str, port: int,
+ cc_list: [], http_prefix: str,
+ object_url: str, actor_bookmarked: str,
+ debug: bool) -> {}:
"""Removes a bookmark
actor is the person doing the bookmarking
'to' might be a specific person (actor) whose post was bookmarked
object is typically the url of the message which was bookmarked
"""
- if not urlPermitted(objectUrl, federationList):
+ if not url_permitted(object_url, federation_list):
return None
- fullDomain = getFullDomain(domain, port)
+ full_domain = get_full_domain(domain, port)
- newUndoBookmarkJson = {
+ new_undo_bookmark_json = {
"@context": "https://www.w3.org/ns/activitystreams",
'type': 'Undo',
- 'actor': localActorUrl(httpPrefix, nickname, fullDomain),
+ 'actor': local_actor_url(http_prefix, nickname, full_domain),
'object': {
'type': 'Bookmark',
- 'actor': localActorUrl(httpPrefix, nickname, fullDomain),
- 'object': objectUrl
+ 'actor': local_actor_url(http_prefix, nickname, full_domain),
+ 'object': object_url
}
}
- if ccList:
- if len(ccList) > 0:
- newUndoBookmarkJson['cc'] = ccList
- newUndoBookmarkJson['object']['cc'] = ccList
+ if cc_list:
+ if len(cc_list) > 0:
+ new_undo_bookmark_json['cc'] = cc_list
+ new_undo_bookmark_json['object']['cc'] = cc_list
# Extract the domain and nickname from a statuses link
- bookmarkedPostNickname = None
- bookmarkedPostDomain = None
- bookmarkedPostPort = None
- if actorBookmarked:
- acBm = actorBookmarked
- bookmarkedPostNickname = getNicknameFromActor(acBm)
- bookmarkedPostDomain, bookmarkedPostPort = getDomainFromActor(acBm)
+ bookmarked_post_nickname = None
+ if actor_bookmarked:
+ ac_bm = actor_bookmarked
+ bookmarked_post_nickname = get_nickname_from_actor(ac_bm)
+ _, _ = get_domain_from_actor(ac_bm)
else:
- if hasUsersPath(objectUrl):
- ou = objectUrl
- bookmarkedPostNickname = getNicknameFromActor(ou)
- bookmarkedPostDomain, bookmarkedPostPort = getDomainFromActor(ou)
+ if has_users_path(object_url):
+ ourl = object_url
+ bookmarked_post_nickname = get_nickname_from_actor(ourl)
+ _, _ = get_domain_from_actor(ourl)
- if bookmarkedPostNickname:
- postFilename = locatePost(baseDir, nickname, domain, objectUrl)
- if not postFilename:
+ if bookmarked_post_nickname:
+ post_filename = locate_post(base_dir, nickname, domain, object_url)
+ if not post_filename:
return None
- undoBookmarksCollectionEntry(recentPostsCache,
- baseDir, postFilename, objectUrl,
- newUndoBookmarkJson['actor'],
- domain, debug)
+ undo_bookmarks_collection_entry(recent_posts_cache,
+ base_dir, post_filename,
+ new_undo_bookmark_json['actor'],
+ domain, debug)
else:
return None
- return newUndoBookmarkJson
+ return new_undo_bookmark_json
-def sendBookmarkViaServer(baseDir: str, session,
- nickname: str, password: str,
- domain: str, fromPort: int,
- httpPrefix: str, bookmarkUrl: str,
- cachedWebfingers: {}, personCache: {},
- debug: bool, projectVersion: str,
- signingPrivateKeyPem: str) -> {}:
+def send_bookmark_via_server(base_dir: str, session,
+ nickname: str, password: str,
+ domain: str, from_port: int,
+ http_prefix: str, bookmark_url: str,
+ cached_webfingers: {}, person_cache: {},
+ debug: bool, project_version: str,
+ signing_priv_key_pem: str) -> {}:
"""Creates a bookmark via c2s
"""
if not session:
- print('WARN: No session for sendBookmarkViaServer')
+ print('WARN: No session for send_bookmark_via_server')
return 6
- domainFull = getFullDomain(domain, fromPort)
+ domain_full = get_full_domain(domain, from_port)
- actor = localActorUrl(httpPrefix, nickname, domainFull)
+ actor = local_actor_url(http_prefix, nickname, domain_full)
- newBookmarkJson = {
+ new_bookmark_json = {
"@context": "https://www.w3.org/ns/activitystreams",
"type": "Add",
"actor": actor,
"to": [actor],
"object": {
"type": "Document",
- "url": bookmarkUrl,
+ "url": bookmark_url,
"to": [actor]
},
"target": actor + "/tlbookmarks"
}
- handle = httpPrefix + '://' + domainFull + '/@' + nickname
+ handle = http_prefix + '://' + domain_full + '/@' + nickname
# lookup the inbox for the To handle
- wfRequest = webfingerHandle(session, handle, httpPrefix,
- cachedWebfingers,
- domain, projectVersion, debug, False,
- signingPrivateKeyPem)
- if not wfRequest:
+ wf_request = \
+ webfinger_handle(session, handle, http_prefix,
+ cached_webfingers,
+ domain, project_version, debug, False,
+ signing_priv_key_pem)
+ if not wf_request:
if debug:
print('DEBUG: bookmark webfinger failed for ' + handle)
return 1
- if not isinstance(wfRequest, dict):
+ if not isinstance(wf_request, dict):
print('WARN: bookmark webfinger for ' + handle +
- ' did not return a dict. ' + str(wfRequest))
+ ' did not return a dict. ' + str(wf_request))
return 1
- postToBox = 'outbox'
+ post_to_box = 'outbox'
# get the actor inbox for the To handle
- originDomain = domain
- (inboxUrl, pubKeyId, pubKey, fromPersonId, sharedInbox, avatarUrl,
- displayName, _) = getPersonBox(signingPrivateKeyPem,
- originDomain,
- baseDir, session, wfRequest,
- personCache,
- projectVersion, httpPrefix,
- nickname, domain,
- postToBox, 58391)
+ origin_domain = domain
+ (inbox_url, _, _, from_person_id, _, _,
+ _, _) = get_person_box(signing_priv_key_pem,
+ origin_domain,
+ base_dir, session, wf_request,
+ person_cache,
+ project_version, http_prefix,
+ nickname, domain,
+ post_to_box, 58391)
- if not inboxUrl:
+ if not inbox_url:
if debug:
- print('DEBUG: bookmark no ' + postToBox +
+ print('DEBUG: bookmark no ' + post_to_box +
' was found for ' + handle)
return 3
- if not fromPersonId:
+ if not from_person_id:
if debug:
print('DEBUG: bookmark no actor was found for ' + handle)
return 4
- authHeader = createBasicAuthHeader(nickname, password)
+ auth_header = create_basic_auth_header(nickname, password)
headers = {
'host': domain,
'Content-type': 'application/json',
- 'Authorization': authHeader
+ 'Authorization': auth_header
}
- postResult = postJson(httpPrefix, domainFull,
- session, newBookmarkJson, [], inboxUrl,
- headers, 3, True)
- if not postResult:
+ post_result = post_json(http_prefix, domain_full,
+ session, new_bookmark_json, [], inbox_url,
+ headers, 3, True)
+ if not post_result:
if debug:
- print('WARN: POST bookmark failed for c2s to ' + inboxUrl)
+ print('WARN: POST bookmark failed for c2s to ' + inbox_url)
return 5
if debug:
print('DEBUG: c2s POST bookmark success')
- return newBookmarkJson
+ return new_bookmark_json
-def sendUndoBookmarkViaServer(baseDir: str, session,
- nickname: str, password: str,
- domain: str, fromPort: int,
- httpPrefix: str, bookmarkUrl: str,
- cachedWebfingers: {}, personCache: {},
- debug: bool, projectVersion: str,
- signingPrivateKeyPem: str) -> {}:
+def send_undo_bookmark_via_server(base_dir: str, session,
+ nickname: str, password: str,
+ domain: str, from_port: int,
+ http_prefix: str, bookmark_url: str,
+ cached_webfingers: {}, person_cache: {},
+ debug: bool, project_version: str,
+ signing_priv_key_pem: str) -> {}:
"""Removes a bookmark via c2s
"""
if not session:
- print('WARN: No session for sendUndoBookmarkViaServer')
+ print('WARN: No session for send_undo_bookmark_via_server')
return 6
- domainFull = getFullDomain(domain, fromPort)
+ domain_full = get_full_domain(domain, from_port)
- actor = localActorUrl(httpPrefix, nickname, domainFull)
+ actor = local_actor_url(http_prefix, nickname, domain_full)
- newBookmarkJson = {
+ new_bookmark_json = {
"@context": "https://www.w3.org/ns/activitystreams",
"type": "Remove",
"actor": actor,
"to": [actor],
"object": {
"type": "Document",
- "url": bookmarkUrl,
+ "url": bookmark_url,
"to": [actor]
},
"target": actor + "/tlbookmarks"
}
- handle = httpPrefix + '://' + domainFull + '/@' + nickname
+ handle = http_prefix + '://' + domain_full + '/@' + nickname
# lookup the inbox for the To handle
- wfRequest = webfingerHandle(session, handle, httpPrefix,
- cachedWebfingers,
- domain, projectVersion, debug, False,
- signingPrivateKeyPem)
- if not wfRequest:
+ wf_request = \
+ webfinger_handle(session, handle, http_prefix,
+ cached_webfingers,
+ domain, project_version, debug, False,
+ signing_priv_key_pem)
+ if not wf_request:
if debug:
print('DEBUG: unbookmark webfinger failed for ' + handle)
return 1
- if not isinstance(wfRequest, dict):
+ if not isinstance(wf_request, dict):
print('WARN: unbookmark webfinger for ' + handle +
- ' did not return a dict. ' + str(wfRequest))
+ ' did not return a dict. ' + str(wf_request))
return 1
- postToBox = 'outbox'
+ post_to_box = 'outbox'
# get the actor inbox for the To handle
- originDomain = domain
- (inboxUrl, pubKeyId, pubKey, fromPersonId, sharedInbox, avatarUrl,
- displayName, _) = getPersonBox(signingPrivateKeyPem,
- originDomain,
- baseDir, session, wfRequest,
- personCache,
- projectVersion, httpPrefix,
- nickname, domain,
- postToBox, 52594)
+ origin_domain = domain
+ (inbox_url, _, _, from_person_id, _, _,
+ _, _) = get_person_box(signing_priv_key_pem,
+ origin_domain,
+ base_dir, session, wf_request,
+ person_cache,
+ project_version, http_prefix,
+ nickname, domain,
+ post_to_box, 52594)
- if not inboxUrl:
+ if not inbox_url:
if debug:
- print('DEBUG: unbookmark no ' + postToBox +
+ print('DEBUG: unbookmark no ' + post_to_box +
' was found for ' + handle)
return 3
- if not fromPersonId:
+ if not from_person_id:
if debug:
print('DEBUG: unbookmark no actor was found for ' + handle)
return 4
- authHeader = createBasicAuthHeader(nickname, password)
+ auth_header = create_basic_auth_header(nickname, password)
headers = {
'host': domain,
'Content-type': 'application/json',
- 'Authorization': authHeader
+ 'Authorization': auth_header
}
- postResult = postJson(httpPrefix, domainFull,
- session, newBookmarkJson, [], inboxUrl,
- headers, 3, True)
- if not postResult:
+ post_result = post_json(http_prefix, domain_full,
+ session, new_bookmark_json, [], inbox_url,
+ headers, 3, True)
+ if not post_result:
if debug:
- print('WARN: POST unbookmark failed for c2s to ' + inboxUrl)
+ print('WARN: POST unbookmark failed for c2s to ' + inbox_url)
return 5
if debug:
print('DEBUG: c2s POST unbookmark success')
- return newBookmarkJson
+ return new_bookmark_json
-def outboxBookmark(recentPostsCache: {},
- baseDir: str, httpPrefix: str,
- nickname: str, domain: str, port: int,
- messageJson: {}, debug: bool) -> None:
+def outbox_bookmark(recent_posts_cache: {},
+ base_dir: str, http_prefix: str,
+ nickname: str, domain: str, port: int,
+ message_json: {}, debug: bool) -> None:
""" When a bookmark request is received by the outbox from c2s
"""
- if not messageJson.get('type'):
+ if not message_json.get('type'):
return
- if messageJson['type'] != 'Add':
+ if message_json['type'] != 'Add':
return
- if not messageJson.get('actor'):
- if debug:
- print('DEBUG: no actor in bookmark Add')
+ if not has_actor(message_json, debug):
return
- if not hasObjectDict(messageJson):
- if debug:
- print('DEBUG: no object in bookmark Add')
- return
- if not messageJson.get('target'):
+ if not message_json.get('target'):
if debug:
print('DEBUG: no target in bookmark Add')
return
- if not messageJson['object'].get('type'):
- if debug:
- print('DEBUG: no object type in bookmark Add')
+ if not has_object_string_type(message_json, debug):
return
- if not isinstance(messageJson['target'], str):
+ if not isinstance(message_json['target'], str):
if debug:
print('DEBUG: bookmark Add target is not string')
return
- domainFull = getFullDomain(domain, port)
- if not messageJson['target'].endswith('://' + domainFull +
- '/users/' + nickname +
- '/tlbookmarks'):
+ domain_full = get_full_domain(domain, port)
+ expected_target = \
+ http_prefix + '://' + domain_full + \
+ '/users/' + nickname + '/tlbookmarks'
+ if message_json['target'] != expected_target:
if debug:
print('DEBUG: bookmark Add target invalid ' +
- messageJson['target'])
+ message_json['target'])
return
- if messageJson['object']['type'] != 'Document':
+ if message_json['object']['type'] != 'Document':
if debug:
print('DEBUG: bookmark Add type is not Document')
return
- if not messageJson['object'].get('url'):
+ if not message_json['object'].get('url'):
if debug:
print('DEBUG: bookmark Add missing url')
return
if debug:
print('DEBUG: c2s bookmark Add request arrived in outbox')
- messageUrl = removeIdEnding(messageJson['object']['url'])
- domain = removeDomainPort(domain)
- postFilename = locatePost(baseDir, nickname, domain, messageUrl)
- if not postFilename:
+ message_url = remove_id_ending(message_json['object']['url'])
+ domain = remove_domain_port(domain)
+ post_filename = locate_post(base_dir, nickname, domain, message_url)
+ if not post_filename:
if debug:
print('DEBUG: c2s like post not found in inbox or outbox')
- print(messageUrl)
+ print(message_url)
return True
- updateBookmarksCollection(recentPostsCache,
- baseDir, postFilename, messageUrl,
- messageJson['actor'], domain, debug)
+ update_bookmarks_collection(recent_posts_cache,
+ base_dir, post_filename, message_url,
+ message_json['actor'], domain, debug)
if debug:
- print('DEBUG: post bookmarked via c2s - ' + postFilename)
+ print('DEBUG: post bookmarked via c2s - ' + post_filename)
-def outboxUndoBookmark(recentPostsCache: {},
- baseDir: str, httpPrefix: str,
- nickname: str, domain: str, port: int,
- messageJson: {}, debug: bool) -> None:
+def outbox_undo_bookmark(recent_posts_cache: {},
+ base_dir: str, http_prefix: str,
+ nickname: str, domain: str, port: int,
+ message_json: {}, debug: bool) -> None:
""" When an undo bookmark request is received by the outbox from c2s
"""
- if not messageJson.get('type'):
+ if not message_json.get('type'):
return
- if messageJson['type'] != 'Remove':
+ if message_json['type'] != 'Remove':
return
- if not messageJson.get('actor'):
- if debug:
- print('DEBUG: no actor in unbookmark Remove')
+ if not has_actor(message_json, debug):
return
- if not hasObjectDict(messageJson):
- if debug:
- print('DEBUG: no object in unbookmark Remove')
- return
- if not messageJson.get('target'):
+ if not message_json.get('target'):
if debug:
print('DEBUG: no target in unbookmark Remove')
return
- if not messageJson['object'].get('type'):
- if debug:
- print('DEBUG: no object type in bookmark Remove')
+ if not has_object_string_type(message_json, debug):
return
- if not isinstance(messageJson['target'], str):
+ if not isinstance(message_json['target'], str):
if debug:
print('DEBUG: unbookmark Remove target is not string')
return
- domainFull = getFullDomain(domain, port)
- if not messageJson['target'].endswith('://' + domainFull +
- '/users/' + nickname +
- '/tlbookmarks'):
+ domain_full = get_full_domain(domain, port)
+ expected_target = \
+ http_prefix + '://' + domain_full + \
+ '/users/' + nickname + '/tlbookmarks'
+ if message_json['target'] != expected_target:
if debug:
print('DEBUG: unbookmark Remove target invalid ' +
- messageJson['target'])
+ message_json['target'])
return
- if messageJson['object']['type'] != 'Document':
+ if message_json['object']['type'] != 'Document':
if debug:
print('DEBUG: unbookmark Remove type is not Document')
return
- if not messageJson['object'].get('url'):
+ if not message_json['object'].get('url'):
if debug:
print('DEBUG: unbookmark Remove missing url')
return
if debug:
print('DEBUG: c2s unbookmark Remove request arrived in outbox')
- messageUrl = removeIdEnding(messageJson['object']['url'])
- domain = removeDomainPort(domain)
- postFilename = locatePost(baseDir, nickname, domain, messageUrl)
- if not postFilename:
+ message_url = remove_id_ending(message_json['object']['url'])
+ domain = remove_domain_port(domain)
+ post_filename = locate_post(base_dir, nickname, domain, message_url)
+ if not post_filename:
if debug:
print('DEBUG: c2s unbookmark post not found in inbox or outbox')
- print(messageUrl)
+ print(message_url)
return True
- updateBookmarksCollection(recentPostsCache,
- baseDir, postFilename, messageUrl,
- messageJson['actor'], domain, debug)
+ update_bookmarks_collection(recent_posts_cache,
+ base_dir, post_filename, message_url,
+ message_json['actor'], domain, debug)
if debug:
- print('DEBUG: post unbookmarked via c2s - ' + postFilename)
+ print('DEBUG: post unbookmarked via c2s - ' + post_filename)
diff --git a/briar.py b/briar.py
index 76369208b..811467155 100644
--- a/briar.py
+++ b/briar.py
@@ -1,104 +1,129 @@
__filename__ = "briar.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
-__version__ = "1.2.0"
+__version__ = "1.3.0"
__maintainer__ = "Bob Mottram"
__email__ = "bob@libreserver.org"
__status__ = "Production"
__module_group__ = "Profile Metadata"
-def getBriarAddress(actorJson: {}) -> str:
+from utils import get_attachment_property_value
+
+
+def get_briar_address(actor_json: {}) -> str:
"""Returns briar address for the given actor
"""
- if not actorJson.get('attachment'):
+ if not actor_json.get('attachment'):
return ''
- for propertyValue in actorJson['attachment']:
- if not propertyValue.get('name'):
+ for property_value in actor_json['attachment']:
+ name_value = None
+ if property_value.get('name'):
+ name_value = property_value['name']
+ elif property_value.get('schema:name'):
+ name_value = property_value['schema:name']
+ if not name_value:
continue
- if not propertyValue['name'].lower().startswith('briar'):
+ if not name_value.lower().startswith('briar'):
continue
- if not propertyValue.get('type'):
+ if not property_value.get('type'):
continue
- if not propertyValue.get('value'):
+ prop_value_name, prop_value = \
+ get_attachment_property_value(property_value)
+ if not prop_value:
continue
- if propertyValue['type'] != 'PropertyValue':
+ if not property_value['type'].endswith('PropertyValue'):
continue
- propertyValue['value'] = propertyValue['value'].strip()
- if len(propertyValue['value']) < 50:
+ property_value[prop_value_name] = prop_value.strip()
+ if len(property_value[prop_value_name]) < 50:
continue
- if not propertyValue['value'].startswith('briar://'):
+ if not property_value[prop_value_name].startswith('briar://'):
continue
- if propertyValue['value'].lower() != propertyValue['value']:
+ if property_value[prop_value_name].lower() != \
+ property_value[prop_value_name]:
continue
- if '"' in propertyValue['value']:
+ if '"' in property_value[prop_value_name]:
continue
- if ' ' in propertyValue['value']:
+ if ' ' in property_value[prop_value_name]:
continue
- if ',' in propertyValue['value']:
+ if ',' in property_value[prop_value_name]:
continue
- if '.' in propertyValue['value']:
+ if '.' in property_value[prop_value_name]:
continue
- return propertyValue['value']
+ return property_value[prop_value_name]
return ''
-def setBriarAddress(actorJson: {}, briarAddress: str) -> None:
+def set_briar_address(actor_json: {}, briar_address: str) -> None:
"""Sets an briar address for the given actor
"""
- notBriarAddress = False
+ not_briar_address = False
- if len(briarAddress) < 50:
- notBriarAddress = True
- if not briarAddress.startswith('briar://'):
- notBriarAddress = True
- if briarAddress.lower() != briarAddress:
- notBriarAddress = True
- if '"' in briarAddress:
- notBriarAddress = True
- if ' ' in briarAddress:
- notBriarAddress = True
- if '.' in briarAddress:
- notBriarAddress = True
- if ',' in briarAddress:
- notBriarAddress = True
- if '<' in briarAddress:
- notBriarAddress = True
+ if len(briar_address) < 50:
+ not_briar_address = True
+ if not briar_address.startswith('briar://'):
+ not_briar_address = True
+ if briar_address.lower() != briar_address:
+ not_briar_address = True
+ if '"' in briar_address:
+ not_briar_address = True
+ if ' ' in briar_address:
+ not_briar_address = True
+ if '.' in briar_address:
+ not_briar_address = True
+ if ',' in briar_address:
+ not_briar_address = True
+ if '<' in briar_address:
+ not_briar_address = True
- if not actorJson.get('attachment'):
- actorJson['attachment'] = []
+ if not actor_json.get('attachment'):
+ actor_json['attachment'] = []
# remove any existing value
- propertyFound = None
- for propertyValue in actorJson['attachment']:
- if not propertyValue.get('name'):
+ property_found = None
+ for property_value in actor_json['attachment']:
+ name_value = None
+ if property_value.get('name'):
+ name_value = property_value['name']
+ elif property_value.get('schema:name'):
+ name_value = property_value['schema:name']
+ if not name_value:
continue
- if not propertyValue.get('type'):
+ if not property_value.get('type'):
continue
- if not propertyValue['name'].lower().startswith('briar'):
+ if not name_value.lower().startswith('briar'):
continue
- propertyFound = propertyValue
+ property_found = property_value
break
- if propertyFound:
- actorJson['attachment'].remove(propertyFound)
- if notBriarAddress:
+ if property_found:
+ actor_json['attachment'].remove(property_found)
+ if not_briar_address:
return
- for propertyValue in actorJson['attachment']:
- if not propertyValue.get('name'):
+ for property_value in actor_json['attachment']:
+ name_value = None
+ if property_value.get('name'):
+ name_value = property_value['name']
+ elif property_value.get('schema:name'):
+ name_value = property_value['schema:name']
+ if not name_value:
continue
- if not propertyValue.get('type'):
+ if not property_value.get('type'):
continue
- if not propertyValue['name'].lower().startswith('briar'):
+ if not name_value.lower().startswith('briar'):
continue
- if propertyValue['type'] != 'PropertyValue':
+ if not property_value['type'].endswith('PropertyValue'):
continue
- propertyValue['value'] = briarAddress
+ prop_value_name, _ = \
+ get_attachment_property_value(property_value)
+ if not prop_value_name:
+ continue
+ property_value[prop_value_name] = briar_address
return
- newBriarAddress = {
+ new_briar_address = {
"name": "Briar",
"type": "PropertyValue",
- "value": briarAddress
+ "value": briar_address
}
- actorJson['attachment'].append(newBriarAddress)
+ actor_json['attachment'].append(new_briar_address)
diff --git a/cache.py b/cache.py
index 7b4654944..0ead2f336 100644
--- a/cache.py
+++ b/cache.py
@@ -1,7 +1,7 @@
__filename__ = "cache.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
-__version__ = "1.2.0"
+__version__ = "1.3.0"
__maintainer__ = "Bob Mottram"
__email__ = "bob@libreserver.org"
__status__ = "Production"
@@ -9,179 +9,194 @@ __module_group__ = "Core"
import os
import datetime
-from session import urlExists
-from session import getJson
-from utils import loadJson
-from utils import saveJson
-from utils import getFileCaseInsensitive
-from utils import getUserPaths
+from session import url_exists
+from session import get_json
+from utils import load_json
+from utils import save_json
+from utils import get_file_case_insensitive
+from utils import get_user_paths
-def _removePersonFromCache(baseDir: str, personUrl: str,
- personCache: {}) -> bool:
+def _remove_person_from_cache(base_dir: str, person_url: str,
+ person_cache: {}) -> bool:
"""Removes an actor from the cache
"""
- cacheFilename = baseDir + '/cache/actors/' + \
- personUrl.replace('/', '#') + '.json'
- if os.path.isfile(cacheFilename):
+ cache_filename = base_dir + '/cache/actors/' + \
+ person_url.replace('/', '#') + '.json'
+ if os.path.isfile(cache_filename):
try:
- os.remove(cacheFilename)
- except BaseException:
- pass
- if personCache.get(personUrl):
- del personCache[personUrl]
+ os.remove(cache_filename)
+ except OSError:
+ print('EX: unable to delete cached actor ' + str(cache_filename))
+ if person_cache.get(person_url):
+ del person_cache[person_url]
-def checkForChangedActor(session, baseDir: str,
- httpPrefix: str, domainFull: str,
- personUrl: str, avatarUrl: str, personCache: {},
- timeoutSec: int):
+def check_for_changed_actor(session, base_dir: str,
+ http_prefix: str, domain_full: str,
+ person_url: str, avatar_url: str, person_cache: {},
+ timeout_sec: int):
"""Checks if the avatar url exists and if not then
the actor has probably changed without receiving an actor/Person Update.
So clear the actor from the cache and it will be refreshed when the next
post from them is sent
"""
- if not session or not avatarUrl:
+ if not session or not avatar_url:
return
- if domainFull in avatarUrl:
+ if domain_full in avatar_url:
return
- if urlExists(session, avatarUrl, timeoutSec, httpPrefix, domainFull):
+ if url_exists(session, avatar_url, timeout_sec, http_prefix, domain_full):
return
- _removePersonFromCache(baseDir, personUrl, personCache)
+ _remove_person_from_cache(base_dir, person_url, person_cache)
-def storePersonInCache(baseDir: str, personUrl: str,
- personJson: {}, personCache: {},
- allowWriteToFile: bool) -> None:
+def store_person_in_cache(base_dir: str, person_url: str,
+ person_json: {}, person_cache: {},
+ allow_write_to_file: bool) -> None:
"""Store an actor in the cache
"""
- if 'statuses' in personUrl or personUrl.endswith('/actor'):
+ if 'statuses' in person_url or person_url.endswith('/actor'):
# This is not an actor or person account
return
- currTime = datetime.datetime.utcnow()
- personCache[personUrl] = {
- "actor": personJson,
- "timestamp": currTime.strftime("%Y-%m-%dT%H:%M:%SZ")
+ curr_time = datetime.datetime.utcnow()
+ person_cache[person_url] = {
+ "actor": person_json,
+ "timestamp": curr_time.strftime("%Y-%m-%dT%H:%M:%SZ")
}
- if not baseDir:
+ if not base_dir:
return
# store to file
- if not allowWriteToFile:
+ if not allow_write_to_file:
return
- if os.path.isdir(baseDir + '/cache/actors'):
- cacheFilename = baseDir + '/cache/actors/' + \
- personUrl.replace('/', '#') + '.json'
- if not os.path.isfile(cacheFilename):
- saveJson(personJson, cacheFilename)
+ if os.path.isdir(base_dir + '/cache/actors'):
+ cache_filename = base_dir + '/cache/actors/' + \
+ person_url.replace('/', '#') + '.json'
+ if not os.path.isfile(cache_filename):
+ save_json(person_json, cache_filename)
-def getPersonFromCache(baseDir: str, personUrl: str, personCache: {},
- allowWriteToFile: bool) -> {}:
+def get_person_from_cache(base_dir: str, person_url: str,
+ person_cache: {}) -> {}:
"""Get an actor from the cache
"""
# if the actor is not in memory then try to load it from file
- loadedFromFile = False
- if not personCache.get(personUrl):
+ loaded_from_file = False
+ if not person_cache.get(person_url):
# does the person exist as a cached file?
- cacheFilename = baseDir + '/cache/actors/' + \
- personUrl.replace('/', '#') + '.json'
- actorFilename = getFileCaseInsensitive(cacheFilename)
- if actorFilename:
- personJson = loadJson(actorFilename)
- if personJson:
- storePersonInCache(baseDir, personUrl, personJson,
- personCache, False)
- loadedFromFile = True
+ cache_filename = base_dir + '/cache/actors/' + \
+ person_url.replace('/', '#') + '.json'
+ actor_filename = get_file_case_insensitive(cache_filename)
+ if actor_filename:
+ person_json = load_json(actor_filename)
+ if person_json:
+ store_person_in_cache(base_dir, person_url, person_json,
+ person_cache, False)
+ loaded_from_file = True
- if personCache.get(personUrl):
- if not loadedFromFile:
+ if person_cache.get(person_url):
+ if not loaded_from_file:
# update the timestamp for the last time the actor was retrieved
- currTime = datetime.datetime.utcnow()
- currTimeStr = currTime.strftime("%Y-%m-%dT%H:%M:%SZ")
- personCache[personUrl]['timestamp'] = currTimeStr
- return personCache[personUrl]['actor']
+ curr_time = datetime.datetime.utcnow()
+ curr_time_str = curr_time.strftime("%Y-%m-%dT%H:%M:%SZ")
+ person_cache[person_url]['timestamp'] = curr_time_str
+ return person_cache[person_url]['actor']
return None
-def expirePersonCache(personCache: {}):
+def expire_person_cache(person_cache: {}):
"""Expires old entries from the cache in memory
"""
- currTime = datetime.datetime.utcnow()
+ curr_time = datetime.datetime.utcnow()
removals = []
- for personUrl, cacheJson in personCache.items():
- cacheTime = datetime.datetime.strptime(cacheJson['timestamp'],
- "%Y-%m-%dT%H:%M:%SZ")
- daysSinceCached = (currTime - cacheTime).days
- if daysSinceCached > 2:
- removals.append(personUrl)
+ for person_url, cache_json in person_cache.items():
+ cache_time = datetime.datetime.strptime(cache_json['timestamp'],
+ "%Y-%m-%dT%H:%M:%SZ")
+ days_since_cached = (curr_time - cache_time).days
+ if days_since_cached > 2:
+ removals.append(person_url)
if len(removals) > 0:
- for personUrl in removals:
- del personCache[personUrl]
+ for person_url in removals:
+ del person_cache[person_url]
print(str(len(removals)) + ' actors were expired from the cache')
-def storeWebfingerInCache(handle: str, wf, cachedWebfingers: {}) -> None:
+def store_webfinger_in_cache(handle: str, webfing,
+ cached_webfingers: {}) -> None:
"""Store a webfinger endpoint in the cache
"""
- cachedWebfingers[handle] = wf
+ cached_webfingers[handle] = webfing
-def getWebfingerFromCache(handle: str, cachedWebfingers: {}) -> {}:
+def get_webfinger_from_cache(handle: str, cached_webfingers: {}) -> {}:
"""Get webfinger endpoint from the cache
"""
- if cachedWebfingers.get(handle):
- return cachedWebfingers[handle]
+ if cached_webfingers.get(handle):
+ return cached_webfingers[handle]
return None
-def getPersonPubKey(baseDir: str, session, personUrl: str,
- personCache: {}, debug: bool,
- projectVersion: str, httpPrefix: str,
- domain: str, onionDomain: str,
- signingPrivateKeyPem: str) -> str:
- if not personUrl:
+def get_person_pub_key(base_dir: str, session, person_url: str,
+ person_cache: {}, debug: bool,
+ project_version: str, http_prefix: str,
+ domain: str, onion_domain: str,
+ i2p_domain: str,
+ signing_priv_key_pem: str) -> str:
+ """Get the public key for an actor
+ """
+ if not person_url:
return None
- personUrl = personUrl.replace('#main-key', '')
- usersPaths = getUserPaths()
- for possibleUsersPath in usersPaths:
- if personUrl.endswith(possibleUsersPath + 'inbox'):
+ if '#/publicKey' in person_url:
+ person_url = person_url.replace('#/publicKey', '')
+ elif '/main-key' in person_url:
+ person_url = person_url.replace('/main-key', '')
+ else:
+ person_url = person_url.replace('#main-key', '')
+ users_paths = get_user_paths()
+ for possible_users_path in users_paths:
+ if person_url.endswith(possible_users_path + 'inbox'):
if debug:
print('DEBUG: Obtaining public key for shared inbox')
- personUrl = \
- personUrl.replace(possibleUsersPath + 'inbox', '/inbox')
+ person_url = \
+ person_url.replace(possible_users_path + 'inbox', '/inbox')
break
- personJson = \
- getPersonFromCache(baseDir, personUrl, personCache, True)
- if not personJson:
+ person_json = \
+ get_person_from_cache(base_dir, person_url, person_cache)
+ if not person_json:
if debug:
- print('DEBUG: Obtaining public key for ' + personUrl)
- personDomain = domain
- if onionDomain:
- if '.onion/' in personUrl:
- personDomain = onionDomain
- profileStr = 'https://www.w3.org/ns/activitystreams'
- asHeader = {
- 'Accept': 'application/activity+json; profile="' + profileStr + '"'
+ print('DEBUG: Obtaining public key for ' + person_url)
+ person_domain = domain
+ if onion_domain:
+ if '.onion/' in person_url:
+ person_domain = onion_domain
+ elif i2p_domain:
+ if '.i2p/' in person_url:
+ person_domain = i2p_domain
+ profile_str = 'https://www.w3.org/ns/activitystreams'
+ accept_str = \
+ 'application/activity+json; profile="' + profile_str + '"'
+ as_header = {
+ 'Accept': accept_str
}
- personJson = \
- getJson(signingPrivateKeyPem,
- session, personUrl, asHeader, None, debug,
- projectVersion, httpPrefix, personDomain)
- if not personJson:
+ person_json = \
+ get_json(signing_priv_key_pem,
+ session, person_url, as_header, None, debug,
+ project_version, http_prefix, person_domain)
+ if not person_json:
return None
- pubKey = None
- if personJson.get('publicKey'):
- if personJson['publicKey'].get('publicKeyPem'):
- pubKey = personJson['publicKey']['publicKeyPem']
+ pub_key = None
+ if person_json.get('publicKey'):
+ if person_json['publicKey'].get('publicKeyPem'):
+ pub_key = person_json['publicKey']['publicKeyPem']
else:
- if personJson.get('publicKeyPem'):
- pubKey = personJson['publicKeyPem']
+ if person_json.get('publicKeyPem'):
+ pub_key = person_json['publicKeyPem']
- if not pubKey:
+ if not pub_key:
if debug:
- print('DEBUG: Public key not found for ' + personUrl)
+ print('DEBUG: Public key not found for ' + person_url)
- storePersonInCache(baseDir, personUrl, personJson, personCache, True)
- return pubKey
+ store_person_in_cache(base_dir, person_url, person_json,
+ person_cache, True)
+ return pub_key
diff --git a/caddy.example.conf b/caddy.example.conf
index 615501443..3efed5a76 100644
--- a/caddy.example.conf
+++ b/caddy.example.conf
@@ -1,23 +1,28 @@
-# Caddy configuration file for running epicyon on example.com
+# Example configuration file for running Caddy2 in front of Epicyon
-example.com {
- tls {
- # Valid values are rsa2048, rsa4096, rsa8192, p256, and p384.
- # Default is currently p256.
- key_type p384
- }
- header / Strict-Transport-Security "max-age=31556925"
- header / X-Content-Type-Options "nosniff"
- header / X-Download-Options "noopen"
- header / X-Frame-Options "DENY"
- header / X-Permitted-Cross-Domain-Policies "none"
- header / X-Robots-Tag "noindex"
- header / X-XSS-Protection "1; mode=block"
+YOUR_DOMAIN {
+ tls USER@YOUR_DOMAIN
- proxy / http://localhost:7156 {
- transparent
- timeout 10800s
+ header {
+ Strict-Transport-Security "max-age=31556925"
+ Content-Security-Policy "default-src https:; script-src https: 'unsafe-inline'; style-src https: 'unsafe-inline'"
+ X-Content-Type-Options "nosniff"
+ X-Download-Options "noopen"
+ X-Frame-Options "DENY"
+ X-Permitted-Cross-Domain-Policies "none"
+ X-XSS-Protection "1; mode=block"
}
+
+ route /newsmirror/* {
+ root * /var/www/YOUR_DOMAIN
+ file_server
+ }
+
+ route /* {
+ reverse_proxy http://127.0.0.1:7156
+ }
+
+ encode zstd gzip
}
-# eof
+# eof
\ No newline at end of file
diff --git a/categories.py b/categories.py
index f2834e9c2..1ee488e63 100644
--- a/categories.py
+++ b/categories.py
@@ -1,7 +1,7 @@
__filename__ = "categories.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
-__version__ = "1.2.0"
+__version__ = "1.3.0"
__maintainer__ = "Bob Mottram"
__email__ = "bob@libreserver.org"
__status__ = "Production"
@@ -10,118 +10,131 @@ __module_group__ = "RSS Feeds"
import os
import datetime
+MAX_TAG_LENGTH = 42
-def getHashtagCategory(baseDir: str, hashtag: str) -> str:
+INVALID_HASHTAG_CHARS = (',', ' ', '<', ';', '\\', '"', '&', '#')
+
+
+def get_hashtag_category(base_dir: str, hashtag: str) -> str:
"""Returns the category for the hashtag
"""
- categoryFilename = baseDir + '/tags/' + hashtag + '.category'
- if not os.path.isfile(categoryFilename):
- categoryFilename = baseDir + '/tags/' + hashtag.title() + '.category'
- if not os.path.isfile(categoryFilename):
- categoryFilename = \
- baseDir + '/tags/' + hashtag.upper() + '.category'
- if not os.path.isfile(categoryFilename):
+ category_filename = base_dir + '/tags/' + hashtag + '.category'
+ if not os.path.isfile(category_filename):
+ category_filename = base_dir + '/tags/' + hashtag.title() + '.category'
+ if not os.path.isfile(category_filename):
+ category_filename = \
+ base_dir + '/tags/' + hashtag.upper() + '.category'
+ if not os.path.isfile(category_filename):
return ''
- with open(categoryFilename, 'r') as fp:
- categoryStr = fp.read()
- if categoryStr:
- return categoryStr
+ category_str = None
+ try:
+ with open(category_filename, 'r', encoding='utf-8') as category_file:
+ category_str = category_file.read()
+ except OSError:
+ print('EX: unable to read category ' + category_filename)
+ if category_str:
+ return category_str
return ''
-def getHashtagCategories(baseDir: str,
- recent: bool = False, category: str = None) -> None:
+def get_hashtag_categories(base_dir: str,
+ recent: bool = False,
+ category: str = None) -> None:
"""Returns a dictionary containing hashtag categories
"""
- maxTagLength = 42
- hashtagCategories = {}
+ hashtag_categories = {}
if recent:
- currTime = datetime.datetime.utcnow()
- daysSinceEpoch = (currTime - datetime.datetime(1970, 1, 1)).days
- recently = daysSinceEpoch - 1
+ curr_time = datetime.datetime.utcnow()
+ days_since_epoch = (curr_time - datetime.datetime(1970, 1, 1)).days
+ recently = days_since_epoch - 1
- for subdir, dirs, files in os.walk(baseDir + '/tags'):
- for f in files:
- if not f.endswith('.category'):
+ for _, _, files in os.walk(base_dir + '/tags'):
+ for catfile in files:
+ if not catfile.endswith('.category'):
continue
- categoryFilename = os.path.join(baseDir + '/tags', f)
- if not os.path.isfile(categoryFilename):
+ category_filename = os.path.join(base_dir + '/tags', catfile)
+ if not os.path.isfile(category_filename):
continue
- hashtag = f.split('.')[0]
- if len(hashtag) > maxTagLength:
+ hashtag = catfile.split('.')[0]
+ if len(hashtag) > MAX_TAG_LENGTH:
continue
- with open(categoryFilename, 'r') as fp:
- categoryStr = fp.read()
+ with open(category_filename, 'r', encoding='utf-8') as fp_category:
+ category_str = fp_category.read()
- if not categoryStr:
+ if not category_str:
continue
if category:
# only return a dictionary for a specific category
- if categoryStr != category:
+ if category_str != category:
continue
if recent:
- tagsFilename = baseDir + '/tags/' + hashtag + '.txt'
- if not os.path.isfile(tagsFilename):
+ tags_filename = base_dir + '/tags/' + hashtag + '.txt'
+ if not os.path.isfile(tags_filename):
continue
- modTimesinceEpoc = \
- os.path.getmtime(tagsFilename)
- lastModifiedDate = \
- datetime.datetime.fromtimestamp(modTimesinceEpoc)
- fileDaysSinceEpoch = \
- (lastModifiedDate -
+ mod_time_since_epoc = \
+ os.path.getmtime(tags_filename)
+ last_modified_date = \
+ datetime.datetime.fromtimestamp(mod_time_since_epoc)
+ file_days_since_epoch = \
+ (last_modified_date -
datetime.datetime(1970, 1, 1)).days
- if fileDaysSinceEpoch < recently:
+ if file_days_since_epoch < recently:
continue
- if not hashtagCategories.get(categoryStr):
- hashtagCategories[categoryStr] = [hashtag]
+ if not hashtag_categories.get(category_str):
+ hashtag_categories[category_str] = [hashtag]
else:
- if hashtag not in hashtagCategories[categoryStr]:
- hashtagCategories[categoryStr].append(hashtag)
+ if hashtag not in hashtag_categories[category_str]:
+ hashtag_categories[category_str].append(hashtag)
break
- return hashtagCategories
+ return hashtag_categories
-def updateHashtagCategories(baseDir: str) -> None:
+def update_hashtag_categories(base_dir: str) -> None:
"""Regenerates the list of hashtag categories
"""
- categoryListFilename = baseDir + '/accounts/categoryList.txt'
- hashtagCategories = getHashtagCategories(baseDir)
- if not hashtagCategories:
- if os.path.isfile(categoryListFilename):
+ category_list_filename = base_dir + '/accounts/categoryList.txt'
+ hashtag_categories = get_hashtag_categories(base_dir)
+ if not hashtag_categories:
+ if os.path.isfile(category_list_filename):
try:
- os.remove(categoryListFilename)
- except BaseException:
- pass
+ os.remove(category_list_filename)
+ except OSError:
+ print('EX: update_hashtag_categories ' +
+ 'unable to delete cached category list ' +
+ category_list_filename)
return
- categoryList = []
- for categoryStr, hashtagList in hashtagCategories.items():
- categoryList.append(categoryStr)
- categoryList.sort()
+ category_list = []
+ for category_str, _ in hashtag_categories.items():
+ category_list.append(category_str)
+ category_list.sort()
- categoryListStr = ''
- for categoryStr in categoryList:
- categoryListStr += categoryStr + '\n'
+ category_list_str = ''
+ for category_str in category_list:
+ category_list_str += category_str + '\n'
# save a list of available categories for quick lookup
- with open(categoryListFilename, 'w+') as fp:
- fp.write(categoryListStr)
+ try:
+ with open(category_list_filename, 'w+',
+ encoding='utf-8') as fp_category:
+ fp_category.write(category_list_str)
+ except OSError:
+ print('EX: unable to write category ' + category_list_filename)
-def _validHashtagCategory(category: str) -> bool:
+def _valid_hashtag_category(category: str) -> bool:
"""Returns true if the category name is valid
"""
if not category:
return False
- invalidChars = (',', ' ', '<', ';', '\\', '"', '&', '#')
- for ch in invalidChars:
- if ch in category:
+ for char in INVALID_HASHTAG_CHARS:
+ if char in category:
return False
# too long
@@ -131,52 +144,61 @@ def _validHashtagCategory(category: str) -> bool:
return True
-def setHashtagCategory(baseDir: str, hashtag: str, category: str,
- update: bool, force: bool = False) -> bool:
+def set_hashtag_category(base_dir: str, hashtag: str, category: str,
+ update: bool, force: bool = False) -> bool:
"""Sets the category for the hashtag
"""
- if not _validHashtagCategory(category):
+ if not _valid_hashtag_category(category):
return False
if not force:
- hashtagFilename = baseDir + '/tags/' + hashtag + '.txt'
- if not os.path.isfile(hashtagFilename):
+ hashtag_filename = base_dir + '/tags/' + hashtag + '.txt'
+ if not os.path.isfile(hashtag_filename):
hashtag = hashtag.title()
- hashtagFilename = baseDir + '/tags/' + hashtag + '.txt'
- if not os.path.isfile(hashtagFilename):
+ hashtag_filename = base_dir + '/tags/' + hashtag + '.txt'
+ if not os.path.isfile(hashtag_filename):
hashtag = hashtag.upper()
- hashtagFilename = baseDir + '/tags/' + hashtag + '.txt'
- if not os.path.isfile(hashtagFilename):
+ hashtag_filename = base_dir + '/tags/' + hashtag + '.txt'
+ if not os.path.isfile(hashtag_filename):
return False
- if not os.path.isdir(baseDir + '/tags'):
- os.mkdir(baseDir + '/tags')
- categoryFilename = baseDir + '/tags/' + hashtag + '.category'
+ if not os.path.isdir(base_dir + '/tags'):
+ os.mkdir(base_dir + '/tags')
+ category_filename = base_dir + '/tags/' + hashtag + '.category'
if force:
# don't overwrite any existing categories
- if os.path.isfile(categoryFilename):
+ if os.path.isfile(category_filename):
return False
- with open(categoryFilename, 'w+') as fp:
- fp.write(category)
+
+ category_written = False
+ try:
+ with open(category_filename, 'w+', encoding='utf-8') as fp_category:
+ fp_category.write(category)
+ category_written = True
+ except OSError as ex:
+ print('EX: unable to write category ' + category_filename +
+ ' ' + str(ex))
+
+ if category_written:
if update:
- updateHashtagCategories(baseDir)
+ update_hashtag_categories(base_dir)
return True
return False
-def guessHashtagCategory(tagName: str, hashtagCategories: {}) -> str:
+def guess_hashtag_category(tagName: str, hashtag_categories: {}) -> str:
"""Tries to guess a category for the given hashtag.
This works by trying to find the longest similar hashtag
"""
if len(tagName) < 4:
return ''
- categoryMatched = ''
- tagMatchedLen = 0
+ category_matched = ''
+ tag_matched_len = 0
- for categoryStr, hashtagList in hashtagCategories.items():
- for hashtag in hashtagList:
+ for category_str, hashtag_list in hashtag_categories.items():
+ for hashtag in hashtag_list:
if len(hashtag) < 4:
# avoid matching very small strings which often
# lead to spurious categories
@@ -184,13 +206,13 @@ def guessHashtagCategory(tagName: str, hashtagCategories: {}) -> str:
if hashtag not in tagName:
if tagName not in hashtag:
continue
- if not categoryMatched:
- tagMatchedLen = len(hashtag)
- categoryMatched = categoryStr
+ if not category_matched:
+ tag_matched_len = len(hashtag)
+ category_matched = category_str
else:
# match the longest tag
- if len(hashtag) > tagMatchedLen:
- categoryMatched = categoryStr
- if not categoryMatched:
+ if len(hashtag) > tag_matched_len:
+ category_matched = category_str
+ if not category_matched:
return ''
- return categoryMatched
+ return category_matched
diff --git a/city.py b/city.py
index b486c2de0..88867c8cb 100644
--- a/city.py
+++ b/city.py
@@ -1,7 +1,7 @@
__filename__ = "city.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
-__version__ = "1.2.0"
+__version__ = "1.3.0"
__maintainer__ = "Bob Mottram"
__email__ = "bob@libreserver.org"
__status__ = "Production"
@@ -12,7 +12,8 @@ import datetime
import random
import math
from random import randint
-from utils import acctDir
+from utils import acct_dir
+from utils import remove_eol
# states which the simulated city dweller can be in
PERSON_SLEEP = 0
@@ -22,8 +23,10 @@ PERSON_SHOP = 3
PERSON_EVENING = 4
PERSON_PARTY = 5
+BUSY_STATES = (PERSON_WORK, PERSON_SHOP, PERSON_PLAY, PERSON_PARTY)
-def _getDecoyCamera(decoySeed: int) -> (str, str, int):
+
+def _get_decoy_camera(decoy_seed: int) -> (str, str, int):
"""Returns a decoy camera make and model which took the photo
"""
cameras = [
@@ -37,10 +40,16 @@ def _getDecoyCamera(decoySeed: int) -> (str, str, int):
["Apple", "iPhone 12"],
["Apple", "iPhone 12 Mini"],
["Apple", "iPhone 12 Pro Max"],
+ ["Apple", "iPhone 13"],
+ ["Apple", "iPhone 13 Mini"],
+ ["Apple", "iPhone 13 Pro"],
["Samsung", "Galaxy Note 20 Ultra"],
["Samsung", "Galaxy S20 Plus"],
["Samsung", "Galaxy S20 FE 5G"],
["Samsung", "Galaxy Z FOLD 2"],
+ ["Samsung", "Galaxy S12 Plus"],
+ ["Samsung", "Galaxy S12"],
+ ["Samsung", "Galaxy S11 Plus"],
["Samsung", "Galaxy S10 Plus"],
["Samsung", "Galaxy S10e"],
["Samsung", "Galaxy Z Flip"],
@@ -50,8 +59,13 @@ def _getDecoyCamera(decoySeed: int) -> (str, str, int):
["Samsung", "Galaxy S10e"],
["Samsung", "Galaxy S10 5G"],
["Samsung", "Galaxy A60"],
+ ["Samsung", "Note 12"],
+ ["Samsung", "Note 12 Plus"],
+ ["Samsung", "Note 11"],
+ ["Samsung", "Note 11 Plus"],
["Samsung", "Note 10"],
["Samsung", "Note 10 Plus"],
+ ["Samsung", "Galaxy S22 Ultra"],
["Samsung", "Galaxy S21 Ultra"],
["Samsung", "Galaxy Note 20 Ultra"],
["Samsung", "Galaxy S21"],
@@ -60,6 +74,8 @@ def _getDecoyCamera(decoySeed: int) -> (str, str, int):
["Samsung", "Galaxy Z Fold 2"],
["Samsung", "Galaxy A52 5G"],
["Samsung", "Galaxy A71 5G"],
+ ["Google", "Pixel 6 Pro"],
+ ["Google", "Pixel 6"],
["Google", "Pixel 5"],
["Google", "Pixel 4a"],
["Google", "Pixel 4 XL"],
@@ -69,13 +85,13 @@ def _getDecoyCamera(decoySeed: int) -> (str, str, int):
["Google", "Pixel 3"],
["Google", "Pixel 3a"]
]
- randgen = random.Random(decoySeed)
+ randgen = random.Random(decoy_seed)
index = randgen.randint(0, len(cameras) - 1)
- serialNumber = randgen.randint(100000000000, 999999999999999999999999)
- return cameras[index][0], cameras[index][1], serialNumber
+ serial_number = randgen.randint(100000000000, 999999999999999999999999)
+ return cameras[index][0], cameras[index][1], serial_number
-def _getCityPulse(currTimeOfDay, decoySeed: int) -> (float, float):
+def _get_city_pulse(curr_time_of_day, decoy_seed: int) -> (float, float):
"""This simulates expected average patterns of movement in a city.
Jane or Joe average lives and works in the city, commuting in
and out of the central district for work. They have a unique
@@ -84,143 +100,149 @@ def _getCityPulse(currTimeOfDay, decoySeed: int) -> (float, float):
Distance from the city centre is in the range 0.0 - 1.0
Angle is in radians
"""
- randgen = random.Random(decoySeed)
+ randgen = random.Random(decoy_seed)
variance = 3
- busyStates = (PERSON_WORK, PERSON_SHOP, PERSON_PLAY, PERSON_PARTY)
- dataDecoyState = PERSON_SLEEP
- weekday = currTimeOfDay.weekday()
- minHour = 7 + randint(0, variance)
- maxHour = 17 + randint(0, variance)
- if currTimeOfDay.hour > minHour:
- if currTimeOfDay.hour <= maxHour:
+ data_decoy_state = PERSON_SLEEP
+ weekday = curr_time_of_day.weekday()
+ min_hour = 7 + randint(0, variance)
+ max_hour = 17 + randint(0, variance)
+ if curr_time_of_day.hour > min_hour:
+ if curr_time_of_day.hour <= max_hour:
if weekday < 5:
- dataDecoyState = PERSON_WORK
+ data_decoy_state = PERSON_WORK
elif weekday == 5:
- dataDecoyState = PERSON_SHOP
+ data_decoy_state = PERSON_SHOP
else:
- dataDecoyState = PERSON_PLAY
+ data_decoy_state = PERSON_PLAY
else:
if weekday < 5:
- dataDecoyState = PERSON_EVENING
+ data_decoy_state = PERSON_EVENING
else:
- dataDecoyState = PERSON_PARTY
- randgen2 = random.Random(decoySeed + dataDecoyState)
- angleRadians = \
+ data_decoy_state = PERSON_PARTY
+ randgen2 = random.Random(decoy_seed + data_decoy_state)
+ angle_radians = \
(randgen2.randint(0, 100000) / 100000) * 2 * math.pi
# some people are quite random, others have more predictable habits
- decoyRandomness = randgen.randint(1, 3)
+ decoy_randomness = randgen.randint(1, 3)
# occasionally throw in a wildcard to keep the machine learning guessing
- if randint(0, 100) < decoyRandomness:
- distanceFromCityCenter = (randint(0, 100000) / 100000)
- angleRadians = (randint(0, 100000) / 100000) * 2 * math.pi
+ if randint(0, 100) < decoy_randomness:
+ distance_from_city_center = (randint(0, 100000) / 100000)
+ angle_radians = (randint(0, 100000) / 100000) * 2 * math.pi
else:
# what consitutes the central district is fuzzy
- centralDistrictFuzz = (randgen.randint(0, 100000) / 100000) * 0.1
- busyRadius = 0.3 + centralDistrictFuzz
- if dataDecoyState in busyStates:
+ central_district_fuzz = (randgen.randint(0, 100000) / 100000) * 0.1
+ busy_radius = 0.3 + central_district_fuzz
+ if data_decoy_state in BUSY_STATES:
# if we are busy then we're somewhere in the city center
- distanceFromCityCenter = \
- (randgen.randint(0, 100000) / 100000) * busyRadius
+ distance_from_city_center = \
+ (randgen.randint(0, 100000) / 100000) * busy_radius
else:
# otherwise we're in the burbs
- distanceFromCityCenter = busyRadius + \
- ((1.0 - busyRadius) * (randgen.randint(0, 100000) / 100000))
- return distanceFromCityCenter, angleRadians
+ distance_from_city_center = busy_radius + \
+ ((1.0 - busy_radius) * (randgen.randint(0, 100000) / 100000))
+ return distance_from_city_center, angle_radians
-def parseNogoString(nogoLine: str) -> []:
+def parse_nogo_string(nogo_line: str) -> []:
"""Parses a line from locations_nogo.txt and returns the polygon
"""
- nogoLine = nogoLine.replace('\n', '').replace('\r', '')
- polygonStr = nogoLine.split(':', 1)[1]
- if ';' in polygonStr:
- pts = polygonStr.split(';')
+ nogo_line = remove_eol(nogo_line)
+ polygon_str = nogo_line.split(':', 1)[1]
+ if ';' in polygon_str:
+ pts = polygon_str.split(';')
else:
- pts = polygonStr.split(',')
+ pts = polygon_str.split(',')
if len(pts) <= 4:
return []
polygon = []
for index in range(int(len(pts)/2)):
if index*2 + 1 >= len(pts):
break
- longitudeStr = pts[index*2].strip()
- latitudeStr = pts[index*2 + 1].strip()
- if 'E' in latitudeStr or 'W' in latitudeStr:
- longitudeStr = pts[index*2 + 1].strip()
- latitudeStr = pts[index*2].strip()
- if 'E' in longitudeStr:
- longitudeStr = \
- longitudeStr.replace('E', '')
- longitude = float(longitudeStr)
- elif 'W' in longitudeStr:
- longitudeStr = \
- longitudeStr.replace('W', '')
- longitude = -float(longitudeStr)
+ longitude_str = pts[index*2].strip()
+ latitude_str = pts[index*2 + 1].strip()
+ if 'E' in latitude_str or 'W' in latitude_str:
+ longitude_str = pts[index*2 + 1].strip()
+ latitude_str = pts[index*2].strip()
+ if 'E' in longitude_str:
+ longitude_str = \
+ longitude_str.replace('E', '')
+ longitude = float(longitude_str)
+ elif 'W' in longitude_str:
+ longitude_str = \
+ longitude_str.replace('W', '')
+ longitude = -float(longitude_str)
else:
- longitude = float(longitudeStr)
- latitude = float(latitudeStr)
+ longitude = float(longitude_str)
+ latitude = float(latitude_str)
polygon.append([latitude, longitude])
return polygon
-def spoofGeolocation(baseDir: str,
- city: str, currTime, decoySeed: int,
- citiesList: [],
- nogoList: []) -> (float, float, str, str,
- str, str, int):
+def spoof_geolocation(base_dir: str,
+ city: str, curr_time, decoy_seed: int,
+ cities_list: [],
+ nogo_list: []) -> (float, float, str, str,
+ str, str, int):
"""Given a city and the current time spoofs the location
for an image
returns latitude, longitude, N/S, E/W,
camera make, camera model, camera serial number
"""
- locationsFilename = baseDir + '/custom_locations.txt'
- if not os.path.isfile(locationsFilename):
- locationsFilename = baseDir + '/locations.txt'
+ locations_filename = base_dir + '/custom_locations.txt'
+ if not os.path.isfile(locations_filename):
+ locations_filename = base_dir + '/locations.txt'
- nogoFilename = baseDir + '/custom_locations_nogo.txt'
- if not os.path.isfile(nogoFilename):
- nogoFilename = baseDir + '/locations_nogo.txt'
+ nogo_filename = base_dir + '/custom_locations_nogo.txt'
+ if not os.path.isfile(nogo_filename):
+ nogo_filename = base_dir + '/locations_nogo.txt'
- manCityRadius = 0.1
- varianceAtLocation = 0.0004
+ man_city_radius = 0.1
+ variance_at_location = 0.0004
default_latitude = 51.8744
default_longitude = 0.368333
default_latdirection = 'N'
default_longdirection = 'W'
- if citiesList:
- cities = citiesList
+ if cities_list:
+ cities = cities_list
else:
- if not os.path.isfile(locationsFilename):
+ if not os.path.isfile(locations_filename):
return (default_latitude, default_longitude,
default_latdirection, default_longdirection,
"", "", 0)
cities = []
- with open(locationsFilename, 'r') as f:
- cities = f.readlines()
+ try:
+ with open(locations_filename, 'r', encoding='utf-8') as loc_file:
+ cities = loc_file.readlines()
+ except OSError:
+ print('EX: unable to read locations ' + locations_filename)
nogo = []
- if nogoList:
- nogo = nogoList
+ if nogo_list:
+ nogo = nogo_list
else:
- if os.path.isfile(nogoFilename):
- with open(nogoFilename, 'r') as f:
- nogoList = f.readlines()
- for line in nogoList:
- if line.startswith(city + ':'):
- polygon = parseNogoString(line)
- if polygon:
- nogo.append(polygon)
+ if os.path.isfile(nogo_filename):
+ nogo_list = []
+ try:
+ with open(nogo_filename, 'r', encoding='utf-8') as nogo_file:
+ nogo_list = nogo_file.readlines()
+ except OSError:
+ print('EX: unable to read ' + nogo_filename)
+ for line in nogo_list:
+ if line.startswith(city + ':'):
+ polygon = parse_nogo_string(line)
+ if polygon:
+ nogo.append(polygon)
city = city.lower()
- for cityName in cities:
- if city in cityName.lower():
- cityFields = cityName.split(':')
- latitude = cityFields[1]
- longitude = cityFields[2]
- areaKm2 = 0
- if len(cityFields) > 3:
- areaKm2 = int(cityFields[3])
+ for city_name in cities:
+ if city in city_name.lower():
+ city_fields = city_name.split(':')
+ latitude = city_fields[1]
+ longitude = city_fields[2]
+ area_km2 = 0
+ if len(city_fields) > 3:
+ area_km2 = int(city_fields[3])
latdirection = 'N'
longdirection = 'E'
if 'S' in latitude:
@@ -232,99 +254,108 @@ def spoofGeolocation(baseDir: str,
latitude = float(latitude)
longitude = float(longitude)
# get the time of day at the city
- approxTimeZone = int(longitude / 15.0)
+ approx_time_zone = int(longitude / 15.0)
if longdirection == 'E':
- approxTimeZone = -approxTimeZone
- currTimeAdjusted = currTime - \
- datetime.timedelta(hours=approxTimeZone)
- camMake, camModel, camSerialNumber = \
- _getDecoyCamera(decoySeed)
- validCoord = False
- seedOffset = 0
- while not validCoord:
+ approx_time_zone = -approx_time_zone
+ curr_time_adjusted = curr_time - \
+ datetime.timedelta(hours=approx_time_zone)
+ cam_make, cam_model, cam_serial_number = \
+ _get_decoy_camera(decoy_seed)
+ valid_coord = False
+ seed_offset = 0
+ while not valid_coord:
# patterns of activity change in the city over time
- (distanceFromCityCenter, angleRadians) = \
- _getCityPulse(currTimeAdjusted, decoySeed + seedOffset)
+ (distance_from_city_center, angle_radians) = \
+ _get_city_pulse(curr_time_adjusted,
+ decoy_seed + seed_offset)
# The city radius value is in longitude and the reference
# is Manchester. Adjust for the radius of the chosen city.
- if areaKm2 > 1:
- manRadius = math.sqrt(1276 / math.pi)
- radius = math.sqrt(areaKm2 / math.pi)
- cityRadiusDeg = (radius / manRadius) * manCityRadius
+ if area_km2 > 1:
+ man_radius = math.sqrt(1276 / math.pi)
+ radius = math.sqrt(area_km2 / math.pi)
+ city_radius_deg = (radius / man_radius) * man_city_radius
else:
- cityRadiusDeg = manCityRadius
+ city_radius_deg = man_city_radius
# Get the position within the city, with some randomness added
latitude += \
- distanceFromCityCenter * cityRadiusDeg * \
- math.cos(angleRadians)
+ distance_from_city_center * city_radius_deg * \
+ math.cos(angle_radians)
longitude += \
- distanceFromCityCenter * cityRadiusDeg * \
- math.sin(angleRadians)
+ distance_from_city_center * city_radius_deg * \
+ math.sin(angle_radians)
longval = longitude
if longdirection == 'W':
longval = -longitude
- validCoord = not pointInNogo(nogo, latitude, longval)
- if not validCoord:
- seedOffset += 1
- if seedOffset > 100:
+ valid_coord = not point_in_nogo(nogo, latitude, longval)
+ if not valid_coord:
+ seed_offset += 1
+ if seed_offset > 100:
break
# add a small amount of variance around the location
fraction = randint(0, 100000) / 100000
- distanceFromLocation = fraction * fraction * varianceAtLocation
+ distance_from_location = fraction * fraction * variance_at_location
fraction = randint(0, 100000) / 100000
- angleFromLocation = fraction * 2 * math.pi
- latitude += distanceFromLocation * math.cos(angleFromLocation)
- longitude += distanceFromLocation * math.sin(angleFromLocation)
+ angle_from_location = fraction * 2 * math.pi
+ latitude += distance_from_location * math.cos(angle_from_location)
+ longitude += distance_from_location * math.sin(angle_from_location)
# gps locations aren't transcendental, so round to a fixed
# number of decimal places
latitude = int(latitude * 100000) / 100000.0
longitude = int(longitude * 100000) / 100000.0
return (latitude, longitude, latdirection, longdirection,
- camMake, camModel, camSerialNumber)
+ cam_make, cam_model, cam_serial_number)
return (default_latitude, default_longitude,
default_latdirection, default_longdirection,
"", "", 0)
-def getSpoofedCity(city: str, baseDir: str, nickname: str, domain: str) -> str:
+def get_spoofed_city(city: str, base_dir: str,
+ nickname: str, domain: str) -> str:
"""Returns the name of the city to use as a GPS spoofing location for
image metadata
"""
city = ''
- cityFilename = acctDir(baseDir, nickname, domain) + '/city.txt'
- if os.path.isfile(cityFilename):
- with open(cityFilename, 'r') as fp:
- city = fp.read().replace('\n', '')
+ city_filename = acct_dir(base_dir, nickname, domain) + '/city.txt'
+ if os.path.isfile(city_filename):
+ try:
+ with open(city_filename, 'r', encoding='utf-8') as city_file:
+ city1 = city_file.read()
+ city = remove_eol(city1)
+ except OSError:
+ print('EX: unable to read ' + city_filename)
return city
-def _pointInPolygon(poly: [], x: float, y: float) -> bool:
+def _point_in_polygon(poly: [], x_coord: float, y_coord: float) -> bool:
"""Returns true if the given point is inside the given polygon
"""
- n = len(poly)
+ num = len(poly)
inside = False
p2x = 0.0
p2y = 0.0
xints = 0.0
p1x, p1y = poly[0]
- for i in range(n + 1):
- p2x, p2y = poly[i % n]
- if y > min(p1y, p2y):
- if y <= max(p1y, p2y):
- if x <= max(p1x, p2x):
+ for i in range(num + 1):
+ p2x, p2y = poly[i % num]
+ if y_coord > min(p1y, p2y):
+ if y_coord <= max(p1y, p2y):
+ if x_coord <= max(p1x, p2x):
if p1y != p2y:
- xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
- if p1x == p2x or x <= xints:
+ xints = \
+ (y_coord - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
+ if p1x == p2x or x_coord <= xints:
inside = not inside
p1x, p1y = p2x, p2y
return inside
-def pointInNogo(nogo: [], latitude: float, longitude: float) -> bool:
+def point_in_nogo(nogo: [], latitude: float, longitude: float) -> bool:
+ """Returns true of the given geolocation is within a nogo area
+ """
for polygon in nogo:
- if _pointInPolygon(polygon, latitude, longitude):
+ if _point_in_polygon(polygon, latitude, longitude):
return True
return False
diff --git a/code-of-conduct.md b/code-of-conduct.md
index 83ed3d4d1..5dc766847 100644
--- a/code-of-conduct.md
+++ b/code-of-conduct.md
@@ -38,7 +38,7 @@ No insults, harassment (sexual or otherwise), condescension, ad hominem, threats
Condescension means treating others as inferior. Subtle condescension still violates the Code of Conduct even if not blatantly demeaning.
-No stereotyping of or promoting prejudice or discrimination against particular groups or classes/castes of people, including sexism, racism, homophobia, transphobia, age discrimination or discrimination based upon nationality.
+No stereotyping of or promoting prejudice or discrimination against particular groups or classes/castes of people, including sexism, racism, homophobia, transphobia, denying people their right to join or create a trade union, age discrimination or discrimination based upon nationality.
In cases where criticism of ideology or culture remains on-topic, respectfully discuss the ideas.
diff --git a/content.py b/content.py
index fd3fb7626..b7e386de9 100644
--- a/content.py
+++ b/content.py
@@ -1,50 +1,103 @@
__filename__ = "content.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
-__version__ = "1.2.0"
+__version__ = "1.3.0"
__maintainer__ = "Bob Mottram"
__email__ = "bob@libreserver.org"
__status__ = "Production"
__module_group__ = "Core"
+import difflib
+import math
+import html
import os
import email.parser
import urllib.parse
from shutil import copyfile
-from utils import dangerousSVG
-from utils import removeDomainPort
-from utils import isValidLanguage
-from utils import getImageExtensions
-from utils import loadJson
-from utils import fileLastModified
-from utils import getLinkPrefixes
-from utils import dangerousMarkup
-from utils import isPGPEncrypted
-from utils import containsPGPPublicKey
-from utils import acctDir
-from utils import isfloat
-from utils import getCurrencies
-from petnames import getPetName
+from dateutil.parser import parse
+from utils import get_user_paths
+from utils import convert_published_to_local_timezone
+from utils import has_object_dict
+from utils import valid_hash_tag
+from utils import dangerous_svg
+from utils import remove_domain_port
+from utils import get_image_extensions
+from utils import load_json
+from utils import save_json
+from utils import file_last_modified
+from utils import get_link_prefixes
+from utils import dangerous_markup
+from utils import is_pgp_encrypted
+from utils import contains_pgp_public_key
+from utils import acct_dir
+from utils import is_float
+from utils import get_currencies
+from utils import remove_html
+from utils import remove_eol
+from petnames import get_pet_name
+from session import download_image
+
+MUSIC_SITES = ('soundcloud.com', 'bandcamp.com')
+
+MAX_LINK_LENGTH = 40
+
+REMOVE_MARKUP = (
+ 'b', 'i', 'ul', 'ol', 'li', 'em', 'strong',
+ 'blockquote', 'h1', 'h2', 'h3', 'h4', 'h5'
+)
+
+INVALID_CONTENT_STRINGS = (
+ 'mute', 'unmute', 'editeventpost', 'notifypost',
+ 'delete', 'options', 'page', 'repeat',
+ 'bm', 'tl', 'actor', 'unrepeat', 'eventid',
+ 'unannounce', 'like', 'unlike', 'bookmark',
+ 'unbookmark', 'likedBy', 'time',
+ 'year', 'month', 'day', 'editnewpost',
+ 'graph', 'showshare', 'category', 'showwanted',
+ 'rmshare', 'rmwanted', 'repeatprivate',
+ 'unrepeatprivate', 'replyto',
+ 'replyfollowers', 'replydm', 'replychat', 'editblogpost',
+ 'handle', 'blockdomain'
+)
-def removeHtmlTag(htmlStr: str, tag: str) -> str:
+def valid_url_lengths(content: str, max_url_length: int) -> bool:
+ """Returns true if the given content contains urls which are too long
+ """
+ if '://' not in content:
+ return True
+ sections = content.split('://')
+ ctr = 0
+ for text in sections:
+ if ctr == 0:
+ ctr += 1
+ continue
+ if '"' in text:
+ url = text.split('"')[0]
+ if '<' not in url and '>' not in url:
+ if len(url) > max_url_length:
+ return False
+ return True
+
+
+def remove_html_tag(html_str: str, tag: str) -> str:
"""Removes a given tag from a html string
"""
- tagFound = True
- while tagFound:
- matchStr = ' ' + tag + '="'
- if matchStr not in htmlStr:
- tagFound = False
+ tag_found = True
+ while tag_found:
+ match_str = ' ' + tag + '="'
+ if match_str not in html_str:
+ tag_found = False
break
- sections = htmlStr.split(matchStr, 1)
+ sections = html_str.split(match_str, 1)
if '"' not in sections[1]:
- tagFound = False
+ tag_found = False
break
- htmlStr = sections[0] + sections[1].split('"', 1)[1]
- return htmlStr
+ html_str = sections[0] + sections[1].split('"', 1)[1]
+ return html_str
-def _removeQuotesWithinQuotes(content: str) -> str:
+def _remove_quotes_within_quotes(content: str) -> str:
"""Removes any blockquote inside blockquote
"""
if '
' not in content:
@@ -55,25 +108,25 @@ def _removeQuotesWithinQuotes(content: str) -> str:
found = True
while found:
prefix = content.split('
', ctr)[0] + '
'
- quotedStr = content.split('
', ctr)[1]
- if '
' not in quotedStr:
+ quoted_str = content.split('
', ctr)[1]
+ if '
' not in quoted_str:
found = False
else:
- endStr = quotedStr.split('
')[1]
- quotedStr = quotedStr.split('
')[0]
- if '
' not in endStr:
+ end_str = quoted_str.split('
')[1]
+ quoted_str = quoted_str.split('
')[0]
+ if '
' not in end_str:
found = False
- if '
' in quotedStr:
- quotedStr = quotedStr.replace('
', '')
- content = prefix + quotedStr + '
' + endStr
+ if '
' in quoted_str:
+ quoted_str = quoted_str.replace('
', '')
+ content = prefix + quoted_str + '
' + end_str
ctr += 1
return content
-def htmlReplaceEmailQuote(content: str) -> str:
+def html_replace_email_quote(content: str) -> str:
"""Replaces an email style quote "> Some quote" with html blockquote
"""
- if isPGPEncrypted(content) or containsPGPPublicKey(content):
+ if is_pgp_encrypted(content) or contains_pgp_public_key(content):
return content
# replace quote paragraph
if '
"' in content:
@@ -89,34 +142,34 @@ def htmlReplaceEmailQuote(content: str) -> str:
# replace email style quote
if '>> ' not in content:
return content
- contentStr = content.replace('
', '')
- contentLines = contentStr.split('
')
- newContent = ''
- for lineStr in contentLines:
- if not lineStr:
+ content_str = content.replace('
', '')
+ content_lines = content_str.split('
')
+ new_content = ''
+ for line_str in content_lines:
+ if not line_str:
continue
- if '>> ' not in lineStr:
- if lineStr.startswith('> '):
- lineStr = lineStr.replace('> ', '
')
words = content.split(' ')
- if not longWordsList:
- longWordsList = []
- for wordStr in words:
- if len(wordStr) > maxWordLength:
- if wordStr not in longWordsList:
- longWordsList.append(wordStr)
- for wordStr in longWordsList:
- if wordStr.startswith('
'):
- wordStr = wordStr.replace('
', '')
- if wordStr.startswith('<'):
+ if not long_words_list:
+ long_words_list = []
+ for word_str in words:
+ if len(word_str) > max_word_length:
+ if word_str not in long_words_list:
+ long_words_list.append(word_str)
+ for word_str in long_words_list:
+ if word_str.startswith('
'):
+ word_str = word_str.replace('
', '')
+ if word_str.startswith('<'):
continue
- if len(wordStr) == 76:
- if wordStr.upper() == wordStr:
+ if len(word_str) == 76:
+ if word_str.upper() == word_str:
# tox address
continue
- if '=\"' in wordStr:
+ if '=\"' in word_str:
continue
- if '@' in wordStr:
- if '@@' not in wordStr:
+ if '@' in word_str:
+ if '@@' not in word_str:
continue
- if '=.ed25519' in wordStr:
+ if '=.ed25519' in word_str:
continue
- if '.onion' in wordStr:
+ if '.onion' in word_str:
continue
- if '.i2p' in wordStr:
+ if '.i2p' in word_str:
continue
- if 'https:' in wordStr:
+ if 'https:' in word_str:
continue
- elif 'http:' in wordStr:
+ if 'http:' in word_str:
continue
- elif 'i2p:' in wordStr:
+ if 'i2p:' in word_str:
continue
- elif 'gnunet:' in wordStr:
+ if 'gnunet:' in word_str:
continue
- elif 'dat:' in wordStr:
+ if 'dat:' in word_str:
continue
- elif 'rad:' in wordStr:
+ if 'rad:' in word_str:
continue
- elif 'hyper:' in wordStr:
+ if 'hyper:' in word_str:
continue
- elif 'briar:' in wordStr:
+ if 'briar:' in word_str:
continue
- if '<' in wordStr:
- replaceWord = wordStr.split('<', 1)[0]
- # if len(replaceWord) > maxWordLength:
- # replaceWord = replaceWord[:maxWordLength]
- content = content.replace(wordStr, replaceWord)
- wordStr = replaceWord
- if '/' in wordStr:
+ if '<' in word_str:
+ replace_word = word_str.split('<', 1)[0]
+ # if len(replace_word) > max_word_length:
+ # replace_word = replace_word[:max_word_length]
+ content = content.replace(word_str, replace_word)
+ word_str = replace_word
+ if '/' in word_str:
continue
- if len(wordStr[maxWordLength:]) < maxWordLength:
- content = content.replace(wordStr,
- wordStr[:maxWordLength] + '\n' +
- wordStr[maxWordLength:])
+ if len(word_str[max_word_length:]) < max_word_length:
+ content = content.replace(word_str,
+ word_str[:max_word_length] + '\n' +
+ word_str[max_word_length:])
else:
- content = content.replace(wordStr,
- wordStr[:maxWordLength])
+ content = content.replace(word_str,
+ word_str[:max_word_length])
if content.startswith('
', '')
+ ontology_json['@graph'][index]["rdfs:label"].append({
+ "@value": translated_str,
"@language": lang
})
changed = True
if not changed:
continue
- saveJson(ontologyJson, filename + '.new')
+ save_json(ontology_json, filename + '.new')
-def _testCanReplyTo(baseDir: str) -> None:
- print('testCanReplyTo')
- systemLanguage = 'en'
+def _test_can_replyto(base_dir: str) -> None:
+ print('test_can_reply_to')
+ system_language = 'en'
+ languages_understood = [system_language]
nickname = 'test27637'
domain = 'rando.site'
port = 443
- httpPrefix = 'https'
+ http_prefix = 'https'
content = 'This is a test post with links.\n\n' + \
'ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/v4/\n\nhttps://libreserver.org'
- followersOnly = False
- saveToFile = False
- clientToServer = False
- commentsEnabled = True
- attachImageFilename = None
- mediaType = None
- imageDescription = None
+ save_to_file = False
+ client_to_server = False
+ comments_enabled = True
+ attach_image_filename = None
+ media_type = None
+ image_description = None
city = 'London, England'
- testInReplyTo = None
- testInReplyToAtomUri = None
- testSubject = None
- testSchedulePost = False
- testEventDate = None
- testEventTime = None
- testLocation = None
- testIsArticle = False
- conversationId = None
- lowBandwidth = True
+ test_in_reply_to = None
+ test_in_reply_to_atom_uri = None
+ test_subject = None
+ test_schedule_post = False
+ test_event_date = None
+ test_event_time = None
+ test_event_end_time = None
+ test_location = None
+ test_is_article = False
+ conversation_id = None
+ low_bandwidth = True
+ content_license_url = 'https://creativecommons.org/licenses/by/4.0'
+ translate = {}
- postJsonObject = \
- createPublicPost(baseDir, nickname, domain, port, httpPrefix,
- content, followersOnly, saveToFile,
- clientToServer, commentsEnabled,
- attachImageFilename, mediaType,
- imageDescription, city,
- testInReplyTo, testInReplyToAtomUri,
- testSubject, testSchedulePost,
- testEventDate, testEventTime, testLocation,
- testIsArticle, systemLanguage, conversationId,
- lowBandwidth)
+ post_json_object = \
+ create_public_post(base_dir, nickname, domain, port, http_prefix,
+ content, save_to_file,
+ client_to_server, comments_enabled,
+ attach_image_filename, media_type,
+ image_description, city,
+ test_in_reply_to, test_in_reply_to_atom_uri,
+ test_subject, test_schedule_post,
+ test_event_date, test_event_time,
+ test_event_end_time, test_location,
+ test_is_article, system_language, conversation_id,
+ low_bandwidth, content_license_url,
+ languages_understood, translate)
# set the date on the post
- currDateStr = "2021-09-08T20:45:00Z"
- postJsonObject['published'] = currDateStr
- postJsonObject['object']['published'] = currDateStr
+ curr_date_str = "2021-09-08T20:45:00Z"
+ post_json_object['published'] = curr_date_str
+ post_json_object['object']['published'] = curr_date_str
# test a post within the reply interval
- postUrl = postJsonObject['object']['id']
- replyIntervalHours = 2
- currDateStr = "2021-09-08T21:32:10Z"
- assert canReplyTo(baseDir, nickname, domain,
- postUrl, replyIntervalHours,
- currDateStr,
- postJsonObject)
+ post_url = post_json_object['object']['id']
+ reply_interval_hours = 2
+ curr_date_str = "2021-09-08T21:32:10Z"
+ assert can_reply_to(base_dir, nickname, domain,
+ post_url, reply_interval_hours,
+ curr_date_str,
+ post_json_object)
# test a post outside of the reply interval
- currDateStr = "2021-09-09T09:24:47Z"
- assert not canReplyTo(baseDir, nickname, domain,
- postUrl, replyIntervalHours,
- currDateStr,
- postJsonObject)
+ curr_date_str = "2021-09-09T09:24:47Z"
+ assert not can_reply_to(base_dir, nickname, domain,
+ post_url, reply_interval_hours,
+ curr_date_str,
+ post_json_object)
-def runAllTests():
- baseDir = os.getcwd()
+def _test_seconds_between_publish() -> None:
+ print('test_seconds_between_published')
+ published1 = "2021-10-14T09:39:27Z"
+ published2 = "2021-10-14T09:41:28Z"
+
+ seconds_elapsed = seconds_between_published(published1, published2)
+ assert seconds_elapsed == 121
+ # invalid date
+ published2 = "2021-10-14N09:41:28Z"
+ seconds_elapsed = seconds_between_published(published1, published2)
+ assert seconds_elapsed == -1
+
+
+def _test_word_similarity() -> None:
+ print('test_words_similarity')
+ min_words = 10
+ content1 = "This is the same"
+ content2 = "This is the same"
+ assert words_similarity(content1, content2, min_words) == 100
+ content1 = "This is our world now... " + \
+ "the world of the electron and the switch, the beauty of the baud"
+ content2 = "This is our world now. " + \
+ "The world of the electron and the webkit, the beauty of the baud"
+ similarity = words_similarity(content1, content2, min_words)
+ assert similarity > 70
+ content1 = "
We're growing!
A new denizen " + \
+ "is frequenting HackBucket. You probably know him already " + \
+ "from her epic typos - but let's not spoil too much " + \
+ "\ud83d\udd2e
"
+ content2 = "
We're growing!
A new denizen " + \
+ "is frequenting HackBucket. You probably know them already " + \
+ "from their epic typos - but let's not spoil too much " + \
+ "\ud83d\udd2e
'
+ if text_bold != expected:
+ print(text_bold)
+ assert text_bold == expected
+
+
+def _test_diff_content() -> None:
+ print('diff_content')
+ prev_content = \
+ 'Some text before.\n' + \
+ 'Starting sentence. This is some content.\nThis is another line.'
+ content = \
+ 'Some text before.\nThis is some more content.\nThis is another line.'
+ result = content_diff(content, prev_content)
+ expected = \
+ '
' + \
+ '- Starting sentence ' + \
+ '+ This is some more content ' + \
+ '- This is some content
'
+ assert result == expected
+
+ content = \
+ 'Some text before.\nThis is content.\nThis line.'
+ result = content_diff(content, prev_content)
+ expected = \
+ '
- Starting sentence ' + \
+ '+ This is content ' + \
+ '- This is some content ' + \
+ '+ This line ' + \
+ '- This is another line
' + \
+ '+ This is some content ' + \
+ '- This is some previous content ' + \
+ '+ Some other content ' + \
+ '- Some other previous content' + \
+ '
Mon Dec 14, 01:05
' + \
+ '+ This is some previous content' + \
+ ' ' + \
+ '- This is some more previous content
'
+ assert html_str == expected
+
+
+def _test_color_contrast_value(base_dir: str) -> None:
+ print('test_color_contrast_value')
+ minimum_color_contrast = 4.5
+ background = 'black'
+ foreground = 'white'
+ contrast = color_contrast(background, foreground)
+ assert contrast
+ assert contrast > 20
+ assert contrast < 22
+ foreground = 'grey'
+ contrast = color_contrast(background, foreground)
+ assert contrast
+ assert contrast > 5
+ assert contrast < 6
+ themes = get_themes_list(base_dir)
+ for theme_name in themes:
+ theme_filename = base_dir + '/theme/' + theme_name + '/theme.json'
+ if not os.path.isfile(theme_filename):
+ continue
+ theme_json = load_json(theme_filename)
+ if not theme_json:
+ continue
+ if not theme_json.get('main-fg-color'):
+ continue
+ if not theme_json.get('main-bg-color'):
+ continue
+ foreground = theme_json['main-fg-color']
+ background = theme_json['main-bg-color']
+ contrast = color_contrast(background, foreground)
+ if contrast is None:
+ continue
+ if contrast < minimum_color_contrast:
+ print('Theme ' + theme_name + ' has not enough color contrast ' +
+ str(contrast) + ' < ' + str(minimum_color_contrast))
+ assert contrast >= minimum_color_contrast
+ print('Color contrast is ok for all themes')
+
+
+def _test_remove_end_of_line():
+ print('remove_end_of_line')
+ text = 'some text\r\n'
+ expected = 'some text'
+ assert remove_eol(text) == expected
+ text = 'some text'
+ assert remove_eol(text) == expected
+
+
+def _test_dogwhistles():
+ print('dogwhistles')
+ dogwhistles = {
+ "X-hamstered": "hamsterism",
+ "gerbil": "rodent",
+ "*snake": "slither",
+ "start*end": "something"
+ }
+ content = 'This text does not contain any dogwhistles'
+ assert not detect_dogwhistles(content, dogwhistles)
+ content = 'A gerbil named joe'
+ assert detect_dogwhistles(content, dogwhistles)
+ content = 'A rattlesnake.'
+ assert detect_dogwhistles(content, dogwhistles)
+ content = 'A startthingend.'
+ assert detect_dogwhistles(content, dogwhistles)
+ content = 'This content is unhamstered and yhamstered.'
+ result = detect_dogwhistles(content, dogwhistles)
+ assert result
+ assert result.get('hamstered')
+ assert result['hamstered']['count'] == 2
+ assert result['hamstered']['category'] == "hamsterism"
+
+
+def _test_text_standardize():
+ print('text_standardize')
+ expected = 'This is a test'
+
+ result = standardize_text(expected)
+ if result != expected:
+ print(result)
+ assert result == expected
+
+ text = '𝔗𝔥𝔦𝔰 𝔦𝔰 𝔞 𝔱𝔢𝔰𝔱'
+ result = standardize_text(text)
+ if result != expected:
+ print(result)
+ assert result == expected
+
+ text = '𝕿𝖍𝖎𝖘 𝖎𝖘 𝖆 𝖙𝖊𝖘𝖙'
+ result = standardize_text(text)
+ if result != expected:
+ print(result)
+ assert result == expected
+
+ text = '𝓣𝓱𝓲𝓼 𝓲𝓼 𝓪 𝓽𝓮𝓼𝓽'
+ result = standardize_text(text)
+ if result != expected:
+ print(result)
+ assert result == expected
+
+ text = '𝒯𝒽𝒾𝓈 𝒾𝓈 𝒶 𝓉𝑒𝓈𝓉'
+ result = standardize_text(text)
+ if result != expected:
+ print(result)
+ assert result == expected
+
+ text = '𝕋𝕙𝕚𝕤 𝕚𝕤 𝕒 𝕥𝕖𝕤𝕥'
+ result = standardize_text(text)
+ if result != expected:
+ print(result)
+ assert result == expected
+
+ text = 'This is a test'
+ result = standardize_text(text)
+ if result != expected:
+ print(result)
+ assert result == expected
+
+
+def _test_combine_lines():
+ print('combine_lines')
+ text = 'This is a test'
+ expected = text
+ result = combine_textarea_lines(text)
+ if result != expected:
+ print('expected: ' + expected)
+ print('result: ' + result)
+ assert result == expected
+
+ text = 'First line.\n\nSecond line.'
+ expected = 'First line.
Second line.'
+ result = combine_textarea_lines(text)
+ if result != expected:
+ print('expected: ' + expected)
+ print('result: ' + result)
+ assert result == expected
+
+ text = 'First\nline.\n\nSecond\nline.'
+ expected = 'First line.
Second line.'
+ result = combine_textarea_lines(text)
+ if result != expected:
+ print('expected: ' + expected)
+ print('result: ' + result)
+ assert result == expected
+
+ # with extra space
+ text = 'First\nline.\n\nSecond \nline.'
+ expected = 'First line.
Second line.'
+ result = combine_textarea_lines(text)
+ if result != expected:
+ print('expected: ' + expected)
+ print('result: ' + result)
+ assert result == expected
+
+ text = 'Introduction blurb.\n\n* List item 1\n' + \
+ '* List item 2\n* List item 3\n\nFinal blurb.'
+ expected = 'Introduction blurb.
* List item 1\n' + \
+ '* List item 2\n* List item 3
Final blurb.'
+ result = combine_textarea_lines(text)
+ if result != expected:
+ print('expected: ' + expected)
+ print('result: ' + result)
+ assert result == expected
+
+
+def _test_hashtag_maps():
+ print('hashtag_maps')
+ content = \
+ "
This is a test, with a couple of links and a " + \
+ "#" + \
+ "Hashtag
\n'
if newswire and path.endswith('/newblog'):
- newPostForm += '
\n'
- newPostForm += '
\n'
+ new_post_form += '
\n'
+ new_post_form += '
\n'
else:
- newPostForm += '
\n'
- newPostForm += '
\n'
- newPostForm += '
\n'
- newPostForm += '
' + dropDownContent + '
\n'
+ new_post_form += '
\n'
+ new_post_form += ' \n'
+ new_post_form += '
\n'
+ new_post_form += '
' + drop_down_content + '
\n'
- newPostForm += \
- '
\n'
# for a new blog if newswire items exist then add a citations button
if newswire and path.endswith('/newblog'):
- newPostForm += \
+ new_post_form += \
'
\n'
- newPostForm += replyStr
- if mediaInstance and not replyStr:
- newPostForm += newPostImageSection
+ new_post_form += reply_str
+ if media_instance and not reply_str:
+ new_post_form += new_post_image_section
- if not shareDescription:
- shareDescription = ''
- newPostForm += \
- editTextField(placeholderSubject, 'subject', shareDescription)
- newPostForm += ''
+ if not share_description:
+ share_description = ''
- selectedStr = ' selected'
- if inReplyTo or endpoint == 'newdm':
- if inReplyTo:
- newPostForm += \
- ' ' + placeholderMentions + \
+ # for reminders show the date and time at the top
+ if is_new_reminder:
+ new_post_form += '
\n"
return content
- videoSite = 'https://www.youtube.com'
- if '"' + videoSite in content:
- url = content.split('"' + videoSite)[1]
+ video_site = 'https://www.youtube.com'
+ if 'https://m.youtube.com' in content:
+ content = content.replace('https://m.youtube.com', video_site)
+ if '"' + video_site in content:
+ url = content.split('"' + video_site)[1]
if '"' in url:
- url = url.split('"')[0].replace('/watch?v=', '/embed/')
- if '&' in url:
- url = url.split('&')[0]
- if '?utm_' in url:
- url = url.split('?utm_')[0]
- content = \
- content + "
\n\n
\n"
- return content
+ url = url.split('"')[0]
+ if '/channel/' not in url and '/playlist' not in url:
+ url = url.replace('/watch?v=', '/embed/')
+ if '&' in url:
+ url = url.split('&')[0]
+ if '?utm_' in url:
+ url = url.split('?utm_')[0]
+ content += \
+ "
\n\n" + \
+ "\n" + \
+ "
\n"
+ return content
- invidiousSites = ('https://invidious.snopyta.org',
- 'https://yewtu.be',
- 'https://tube.connect.cafe',
- 'https://invidious.kavin.rocks',
- 'https://invidiou.site',
- 'https://invidious.tube',
- 'https://invidious.xyz',
- 'https://invidious.zapashcanon.fr',
- 'http://c7hqkpkpemu6e7emz5b4vy' +
- 'z7idjgdvgaaa3dyimmeojqbgpea3xqjoid.onion',
- 'http://axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4' +
- 'bzzsg2ii4fv2iid.onion')
- for videoSite in invidiousSites:
- if '"' + videoSite in content:
- url = content.split('"' + videoSite)[1]
+ video_site = 'https://youtu.be/'
+ if '"' + video_site in content:
+ url = content.split('"' + video_site)[1]
+ if '"' in url:
+ url = url.split('"')[0]
+ if '/channel/' not in url and '/playlist' not in url:
+ url = 'embed/' + url
+ if '&' in url:
+ url = url.split('&')[0]
+ if '?utm_' in url:
+ url = url.split('?utm_')[0]
+ video_site = 'https://www.youtube.com/'
+ content += \
+ "
\n\n" + \
+ "\n" + \
+ "
\n"
+ return content
+
+ invidious_sites = (
+ 'https://invidious.snopyta.org',
+ 'https://yewtu.be',
+ 'https://tube.connect.cafe',
+ 'https://invidious.kavin.rocks',
+ 'https://invidiou.site',
+ 'https://invidious.tube',
+ 'https://invidious.xyz',
+ 'https://invidious.zapashcanon.fr',
+ 'http://c7hqkpkpemu6e7emz5b4vy' +
+ 'z7idjgdvgaaa3dyimmeojqbgpea3xqjoid.onion',
+ 'http://axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4' +
+ 'bzzsg2ii4fv2iid.onion'
+ )
+ for video_site in invidious_sites:
+ if '"' + video_site in content:
+ url = content.split('"' + video_site)[1]
if '"' in url:
url = url.split('"')[0].replace('/watch?v=', '/embed/')
if '&' in url:
url = url.split('&')[0]
if '?utm_' in url:
url = url.split('?utm_')[0]
- content = \
- content + "
\n\n\n" + \
+ "\n
\n"
+ "\" frameborder=\"0\" allow=\"fullscreen\" " + \
+ "allowfullscreen tabindex=\"10\">\n" + \
+ "\n\n"
return content
- videoSite = 'https://media.ccc.de'
- if '"' + videoSite in content:
- url = content.split('"' + videoSite)[1]
+ video_site = 'https://media.ccc.de'
+ if '"' + video_site in content:
+ url = content.split('"' + video_site)[1]
if '"' in url:
url = url.split('"')[0]
+ video_site_settings = ''
+ if '#' in url:
+ video_site_settings = '#' + url.split('#', 1)[1]
+ url = url.split('#')[0]
if not url.endswith('/oembed'):
url = url + '/oembed'
- content = \
- content + "
\n\n\n" + \
+ "\n
\n"
+ "allowfullscreen tabindex=\"10\">\n" + \
+ "\n\n"
return content
if '"https://' in content:
- if peertubeInstances:
+ if peertube_instances:
# only create an embedded video for a limited set of
# peertube sites.
- peerTubeSites = peertubeInstances
+ peertube_sites = peertube_instances
else:
# A default minimal set of peertube instances
# Also see https://peertube_isolation.frama.io/list/ for
# adversarial instances. Nothing in that list should be
# in the defaults below.
- peerTubeSites = ('share.tube',
- 'visionon.tv',
- 'peertube.fr',
- 'kolektiva.media',
- 'peertube.social',
- 'videos.lescommuns.org')
- for site in peerTubeSites:
+ peertube_sites = (
+ 'share.tube',
+ 'visionon.tv',
+ 'anarchy.tube',
+ 'peertube.fr',
+ 'video.nerdcave.site',
+ 'kolektiva.media',
+ 'peertube.social',
+ 'videos.lescommuns.org'
+ )
+ for site in peertube_sites:
site = site.strip()
if not site:
continue
@@ -133,38 +197,67 @@ def _addEmbeddedVideoFromSites(translate: {}, content: str,
continue
if '.' not in site:
continue
- siteStr = site
+ site_str = site
if site.startswith('http://'):
site = site.replace('http://', '')
elif site.startswith('https://'):
site = site.replace('https://', '')
if site.endswith('.onion') or site.endswith('.i2p'):
- siteStr = 'http://' + site
+ site_str = 'http://' + site
else:
- siteStr = 'https://' + site
- siteStr = '"' + siteStr
- if siteStr not in content:
+ site_str = 'https://' + site
+ site_str = '"' + site_str
+ if site_str not in content:
continue
- url = content.split(siteStr)[1]
+ url = content.split(site_str)[1]
if '"' not in url:
continue
- url = url.split('"')[0].replace('/watch/', '/embed/')
- content = \
- content + "
\n\n\n" + \
+ "\n
\n"
+ "\" frameborder=\"0\" allow=\"" + \
+ "fullscreen\" allowfullscreen tabindex=\"10\">' + \
+ '\n" + \
+ "\n\n"
return content
return content
-def _addEmbeddedAudio(translate: {}, content: str) -> str:
- """Adds embedded audio for mp3/ogg
+def _add_embedded_audio(translate: {}, content: str) -> str:
+ """Adds embedded audio for mp3/ogg/opus
"""
- if not ('.mp3' in content or '.ogg' in content):
+ if not ('.mp3' in content or
+ '.ogg' in content or
+ '.opus' in content or
+ '.flac' in content):
return content
if '\n\n\n'
return content
-def _addEmbeddedVideo(translate: {}, content: str) -> str:
+def _add_embedded_video(translate: {}, content: str) -> str:
"""Adds embedded video for mp4/webm/ogv
"""
if not ('.mp4' in content or '.webm' in content or '.ogv' in content):
@@ -217,39 +315,40 @@ def _addEmbeddedVideo(translate: {}, content: str) -> str:
extension = '.ogv'
words = content.strip('\n').split(' ')
- for w in words:
- if extension not in w:
+ for wrd in words:
+ if extension not in wrd:
continue
- w = w.replace('href="', '').replace('">', '')
- if w.endswith('.'):
- w = w[:-1]
- if w.endswith('"'):
- w = w[:-1]
- if w.endswith(';'):
- w = w[:-1]
- if w.endswith(':'):
- w = w[:-1]
- if not w.endswith(extension):
+ wrd = wrd.replace('href="', '').replace('">', '')
+ if wrd.endswith('.'):
+ wrd = wrd[:-1]
+ if wrd.endswith('"'):
+ wrd = wrd[:-1]
+ if wrd.endswith(';'):
+ wrd = wrd[:-1]
+ if wrd.endswith(':'):
+ wrd = wrd[:-1]
+ if not wrd.endswith(extension):
continue
- if not validUrlPrefix(w):
+ if not valid_url_prefix(wrd):
continue
content += \
- '
\n'
+ return podcast_str
+
+
+def _html_podcast_performers(podcast_properties: {}) -> str:
+ """Returns html for performers of a podcast
+ """
+ if not podcast_properties:
+ return ''
+ key = 'persons'
+ if not podcast_properties.get(key):
+ return ''
+ if not isinstance(podcast_properties[key], list):
+ return ''
+
+ # list of performers
+ podcast_str = '
\n'
+ return podcast_str
+
+
+def _html_podcast_soundbites(link_url: str, extension: str,
+ podcast_properties: {},
+ translate: {}) -> str:
+ """Returns html for podcast soundbites
+ """
+ if not podcast_properties:
+ return ''
+ if not podcast_properties.get('soundbites'):
+ return ''
+
+ podcast_str = '
\n'
+ podcast_str += '
\n'
+ podcast_str += '
\n'
+ ctr = 1
+ for performer in podcast_properties['soundbites']:
+ if not performer.get('startTime'):
+ continue
+ if not performer['startTime'].isdigit():
+ continue
+ if not performer.get('duration'):
+ continue
+ if not performer['duration'].isdigit():
+ continue
+ end_time = str(float(performer['startTime']) +
+ float(performer['duration']))
+
+ podcast_str += '
\n'
else:
# show the responses to a question
content += '
\n'
# get the maximum number of votes
- maxVotes = 1
- for questionOption in postJsonObject['object']['oneOf']:
- if not questionOption.get('name'):
+ max_votes = 1
+ for question_option in post_json_object['object']['oneOf']:
+ if not question_option.get('name'):
continue
- if not questionOption.get('replies'):
+ if not question_option.get('replies'):
continue
votes = 0
try:
- votes = int(questionOption['replies']['totalItems'])
+ votes = int(question_option['replies']['totalItems'])
except BaseException:
- pass
- if votes > maxVotes:
- maxVotes = int(votes+1)
+ print('EX: insert_question unable to convert to int')
+ if votes > max_votes:
+ max_votes = int(votes+1)
# show the votes as sliders
- questionCtr = 1
- for questionOption in postJsonObject['object']['oneOf']:
- if not questionOption.get('name'):
+ for question_option in post_json_object['object']['oneOf']:
+ if not question_option.get('name'):
continue
- if not questionOption.get('replies'):
+ if not question_option.get('replies'):
continue
votes = 0
try:
- votes = int(questionOption['replies']['totalItems'])
+ votes = int(question_option['replies']['totalItems'])
except BaseException:
- pass
- votesPercent = str(int(votes * 100 / maxVotes))
+ print('EX: insert_question unable to convert to int 2')
+ votes_percent = str(int(votes * 100 / max_votes))
+
content += \
- '
'
# does the lookup file exist?
- if not os.path.isfile(emojiLookupFilename):
- emojiForm += '
' + \
+ if not os.path.isfile(emoji_lookup_filename):
+ emoji_form += '
' + \
translate['No results'] + '
'
- emojiForm += htmlFooter()
- return emojiForm
+ emoji_form += html_footer()
+ return emoji_form
+
+ emoji_json = load_json(emoji_lookup_filename)
+ if emoji_json:
+ if os.path.isfile(custom_emoji_lookup_filename):
+ custom_emoji_json = load_json(custom_emoji_lookup_filename)
+ if custom_emoji_json:
+ emoji_json = dict(emoji_json, **custom_emoji_json)
- emojiJson = loadJson(emojiLookupFilename)
- if emojiJson:
results = {}
- for emojiName, filename in emojiJson.items():
- if searchStr in emojiName:
- results[emojiName] = filename + '.png'
- for emojiName, filename in emojiJson.items():
- if emojiName in searchStr:
- results[emojiName] = filename + '.png'
- headingShown = False
- emojiForm += '
'
- msgStr1 = translate['Copy the text then paste it into your post']
- msgStr2 = ':'
- emojiForm += '
'
+ for emoji_name, filename in emoji_json.items():
+ if search_str in emoji_name:
+ results[emoji_name] = filename + '.png'
+ for emoji_name, filename in emoji_json.items():
+ if emoji_name in search_str:
+ results[emoji_name] = filename + '.png'
- emojiForm += htmlFooter()
- return emojiForm
+ if not results:
+ emoji_form += '
' + \
+ translate['No results'] + '
'
+
+ heading_shown = False
+ emoji_form += '
'
+ msg_str1 = translate['Copy the text then paste it into your post']
+ msg_str2 = ':'
+ emoji_form += '
'
+
+ emoji_form += html_footer()
+ return emoji_form
-def _matchSharedItem(searchStrLowerList: [],
- sharedItem: {}) -> bool:
+def _match_shared_item(search_str_lower_list: [],
+ shared_item: {}) -> bool:
"""Returns true if the shared item matches search criteria
"""
- for searchSubstr in searchStrLowerList:
- searchSubstr = searchSubstr.strip()
- if sharedItem.get('location'):
- if searchSubstr in sharedItem['location'].lower():
+ for search_substr in search_str_lower_list:
+ search_substr = search_substr.strip()
+ if shared_item.get('location'):
+ if search_substr in shared_item['location'].lower():
return True
- if searchSubstr in sharedItem['summary'].lower():
+ if search_substr in shared_item['summary'].lower():
return True
- elif searchSubstr in sharedItem['displayName'].lower():
+ if search_substr in shared_item['displayName'].lower():
return True
- elif searchSubstr in sharedItem['category'].lower():
+ if search_substr in shared_item['category'].lower():
return True
return False
-def _htmlSearchResultSharePage(actor: str, domainFull: str,
- callingDomain: str, pageNumber: int,
- searchStrLower: str, translate: {},
- previous: bool) -> str:
+def _html_search_result_share_page(actor: str, domain_full: str,
+ calling_domain: str, page_number: int,
+ search_str_lower: str, translate: {},
+ previous: bool) -> str:
"""Returns the html for the previous button on shared items search results
"""
- postActor = getAltPath(actor, domainFull, callingDomain)
+ post_actor = get_alt_path(actor, domain_full, calling_domain)
# previous page link, needs to be a POST
if previous:
- pageNumber -= 1
- titleStr = translate['Page up']
- imageUrl = 'pageup.png'
+ page_number -= 1
+ title_str = translate['Page up']
+ image_url = 'pageup.png'
else:
- pageNumber += 1
- titleStr = translate['Page down']
- imageUrl = 'pagedown.png'
- sharedItemsForm = \
- '
' + \
+ contrast_warning + '' + \
+ translate['Color contrast is too low'] + \
+ '
\n'
+
+ table_str = '
\n'
+ table_str += '
\n'
+ table_str += '
\n'
+ table_str += '
\n'
+ table_str += '
\n'
+ table_str += ' \n'
+
+ font_str = '
\n' + table_str
+ color_str = '
\n' + table_str
+ dimension_str = '
\n' + table_str
+ switch_str = '
\n' + table_str
+ for variable_name, value in theme_json.items():
+ if 'font-size' in variable_name:
+ variable_name_str = variable_name.replace('-', ' ')
+ variable_name_str = variable_name_str.title()
+ variable_name_label = variable_name_str
+ if contrast_warning:
+ if variable_name in ('main-bg-color', 'main-fg-color'):
+ variable_name_label = contrast_warning + variable_name_str
+ font_str += \
+ '
' + \
+ variable_name_label + '
'
+ font_str += \
+ '
\n'
+ elif ('-color' in variable_name or
+ '-background' in variable_name or
+ variable_name.endswith('-text') or
+ value.startswith('#') or
+ color_to_hex.get(value)):
+ # only use colors defined as hex
+ if not value.startswith('#'):
+ if color_to_hex.get(value):
+ value = color_to_hex[value]
+ else:
+ continue
+ variable_name_str = variable_name.replace('-', ' ')
+ if ' color' in variable_name_str:
+ variable_name_str = variable_name_str.replace(' color', '')
+ if ' bg' in variable_name_str:
+ variable_name_str = \
+ variable_name_str.replace(' bg', ' background')
+ elif ' fg' in variable_name_str:
+ variable_name_str = \
+ variable_name_str.replace(' fg', ' foreground')
+ if variable_name_str == 'cw':
+ variable_name_str = 'content warning'
+ variable_name_str = variable_name_str.title()
+ color_str += \
+ '
' + \
+ variable_name_str + '
'
+ color_str += \
+ '
\n'
+ elif (('-width' in variable_name or
+ '-height' in variable_name or
+ '-spacing' in variable_name or
+ '-margin' in variable_name or
+ '-vertical' in variable_name) and
+ (value.lower() != 'true' and value.lower() != 'false')):
+ variable_name_str = variable_name.replace('-', ' ')
+ variable_name_str = variable_name_str.title()
+ dimension_str += \
+ '
\n'
# show the posts
- itemCtr = 0
- if timelineJson:
- if 'orderedItems' not in timelineJson:
+ item_ctr = 0
+ if timeline_json:
+ if 'orderedItems' not in timeline_json:
print('ERROR: no orderedItems in timeline for '
- + boxName + ' ' + str(timelineJson))
+ + box_name + ' ' + str(timeline_json))
return ''
- useCacheOnly = False
- if boxName == 'inbox':
- useCacheOnly = True
+ use_cache_only = False
+ if box_name == 'inbox':
+ use_cache_only = True
- if timelineJson:
+ if timeline_json:
# if this is the media timeline then add an extra gallery container
- if boxName == 'tlmedia':
- if pageNumber > 1:
- tlStr += ' '
- tlStr += '
\n'
+ if box_name == 'tlmedia':
+ if page_number > 1:
+ tl_str += ' '
+ tl_str += '
\n'
# show each post in the timeline
- for item in timelineJson['orderedItems']:
+ for item in timeline_json['orderedItems']:
if item['type'] == 'Create' or \
item['type'] == 'Announce':
# is the actor who sent this post snoozed?
- if isPersonSnoozed(baseDir, nickname, domain, item['actor']):
+ if is_person_snoozed(base_dir, nickname, domain,
+ item['actor']):
continue
- if isSelfAnnounce(item):
+ if is_self_announce(item):
continue
# is the post in the memory cache of recent ones?
- currTlStr = None
- if boxName != 'tlmedia' and recentPostsCache.get('html'):
- postId = removeIdEnding(item['id']).replace('/', '#')
- if recentPostsCache['html'].get(postId):
- currTlStr = recentPostsCache['html'][postId]
- currTlStr = \
- preparePostFromHtmlCache(nickname,
- currTlStr,
- boxName,
- pageNumber)
- _logTimelineTiming(enableTimingLog,
- timelineStartTime,
- boxName, '10')
+ curr_tl_str = None
+ if box_name != 'tlmedia' and recent_posts_cache.get('html'):
+ post_id = remove_id_ending(item['id']).replace('/', '#')
+ if recent_posts_cache['html'].get(post_id):
+ curr_tl_str = recent_posts_cache['html'][post_id]
+ curr_tl_str = \
+ prepare_post_from_html_cache(nickname,
+ curr_tl_str,
+ box_name,
+ page_number)
+ _log_timeline_timing(enable_timing_log,
+ timeline_start_time,
+ box_name, '10')
- if not currTlStr:
- _logTimelineTiming(enableTimingLog,
- timelineStartTime,
- boxName, '11')
+ if not curr_tl_str:
+ _log_timeline_timing(enable_timing_log,
+ timeline_start_time,
+ box_name, '11')
+ mitm = False
+ if item.get('mitm'):
+ mitm = True
# read the post from disk
- currTlStr = \
- individualPostAsHtml(signingPrivateKeyPem,
- False, recentPostsCache,
- maxRecentPosts,
- translate, pageNumber,
- baseDir, session,
- cachedWebfingers,
- personCache,
- nickname, domain, port,
- item, None, True,
- allowDeletion,
- httpPrefix, projectVersion,
- boxName,
- YTReplacementDomain,
- twitterReplacementDomain,
- showPublishedDateOnly,
- peertubeInstances,
- allowLocalNetworkAccess,
- theme, systemLanguage,
- maxLikeCount,
- boxName != 'dm',
- showIndividualPostIcons,
- manuallyApproveFollowers,
- False, True, useCacheOnly)
- _logTimelineTiming(enableTimingLog,
- timelineStartTime, boxName, '12')
+ curr_tl_str = \
+ individual_post_as_html(signing_priv_key_pem,
+ False, recent_posts_cache,
+ max_recent_posts,
+ translate, page_number,
+ base_dir, session,
+ cached_webfingers,
+ person_cache,
+ nickname, domain, port,
+ item, None, True,
+ allow_deletion,
+ http_prefix, project_version,
+ box_name,
+ yt_replace_domain,
+ twitter_replacement_domain,
+ show_published_date_only,
+ peertube_instances,
+ allow_local_network_access,
+ theme, system_language,
+ max_like_count,
+ box_name != 'dm',
+ show_individual_post_icons,
+ manually_approve_followers,
+ False, True, use_cache_only,
+ cw_lists, lists_enabled,
+ timezone, mitm,
+ bold_reading, dogwhistles)
+ _log_timeline_timing(enable_timing_log,
+ timeline_start_time, box_name, '12')
- if currTlStr:
- if currTlStr not in tlStr:
- itemCtr += 1
- tlStr += textModeSeparator + currTlStr
- if separatorStr:
- tlStr += separatorStr
- if boxName == 'tlmedia':
- tlStr += '
\n'
+ if curr_tl_str:
+ if curr_tl_str not in tl_str:
+ item_ctr += 1
+ tl_str += text_mode_separator + curr_tl_str
+ if separator_str:
+ tl_str += separator_str
+ if box_name == 'tlmedia':
+ tl_str += '
\n'
- if itemCtr < 3:
- print('Items added to html timeline ' + boxName + ': ' +
- str(itemCtr) + ' ' + str(timelineJson['orderedItems']))
+ if item_ctr < 3:
+ print('Items added to html timeline ' + box_name + ': ' +
+ str(item_ctr) + ' ' + str(timeline_json['orderedItems']))
# page down arrow
- if itemCtr > 0:
- tlStr += textModeSeparator
- tlStr += \
+ if item_ctr > 0:
+ tl_str += text_mode_separator
+ tl_str += \
+ ' \n' + \
'
\n'
- profileForm += htmlFooter()
- return profileForm
+ profile_form += '\n'
+ profile_form += html_footer()
+ return profile_form
diff --git a/webfinger.py b/webfinger.py
index 3ca4f00e5..4f97ff4bb 100644
--- a/webfinger.py
+++ b/webfinger.py
@@ -1,7 +1,7 @@
__filename__ = "webfinger.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
-__version__ = "1.2.0"
+__version__ = "1.3.0"
__maintainer__ = "Bob Mottram"
__email__ = "bob@libreserver.org"
__status__ = "Production"
@@ -9,185 +9,222 @@ __module_group__ = "ActivityPub"
import os
import urllib.parse
-from session import getJson
-from cache import storeWebfingerInCache
-from cache import getWebfingerFromCache
-from utils import getFullDomain
-from utils import loadJson
-from utils import loadJsonOnionify
-from utils import saveJson
-from utils import getProtocolPrefixes
-from utils import removeDomainPort
-from utils import getUserPaths
-from utils import getGroupPaths
-from utils import localActorUrl
+from session import get_json
+from cache import store_webfinger_in_cache
+from cache import get_webfinger_from_cache
+from utils import get_attachment_property_value
+from utils import get_full_domain
+from utils import load_json
+from utils import load_json_onionify
+from utils import save_json
+from utils import get_protocol_prefixes
+from utils import remove_domain_port
+from utils import get_user_paths
+from utils import get_group_paths
+from utils import local_actor_url
-def _parseHandle(handle: str) -> (str, str, bool):
+def _parse_handle(handle: str) -> (str, str, bool):
"""Parses a handle and returns nickname and domain
"""
- groupAccount = False
+ group_account = False
if '.' not in handle:
return None, None, False
- prefixes = getProtocolPrefixes()
- handleStr = handle
+ prefixes = get_protocol_prefixes()
+ handle_str = handle
for prefix in prefixes:
- handleStr = handleStr.replace(prefix, '')
+ handle_str = handle_str.replace(prefix, '')
# try domain/@nick
if '/@' in handle:
- domain, nickname = handleStr.split('/@')
+ domain, nickname = handle_str.split('/@')
return nickname, domain, False
# try nick@domain
if '@' in handle:
if handle.startswith('!'):
handle = handle[1:]
- groupAccount = True
+ group_account = True
nickname, domain = handle.split('@')
- return nickname, domain, groupAccount
+ return nickname, domain, group_account
# try for different /users/ paths
- usersPaths = getUserPaths()
- groupPaths = getGroupPaths()
- for possibleUsersPath in usersPaths:
- if possibleUsersPath in handle:
- if possibleUsersPath in groupPaths:
- groupAccount = True
- domain, nickname = handleStr.split(possibleUsersPath)
- return nickname, domain, groupAccount
+ users_paths = get_user_paths()
+ group_paths = get_group_paths()
+ for possible_users_path in users_paths:
+ if possible_users_path in handle:
+ if possible_users_path in group_paths:
+ group_account = True
+ domain, nickname = handle_str.split(possible_users_path)
+ return nickname, domain, group_account
return None, None, False
-def webfingerHandle(session, handle: str, httpPrefix: str,
- cachedWebfingers: {},
- fromDomain: str, projectVersion: str,
- debug: bool, groupAccount: bool,
- signingPrivateKeyPem: str) -> {}:
+def webfinger_handle(session, handle: str, http_prefix: str,
+ cached_webfingers: {},
+ from_domain: str, project_version: str,
+ debug: bool, group_account: bool,
+ signing_priv_key_pem: str) -> {}:
"""Gets webfinger result for the given ActivityPub handle
+ NOTE: in earlier implementations group_account modified the acct prefix.
+ This has been left in, because currently there is still no consensus
+ about how groups should be implemented.
"""
if not session:
- if debug:
- print('WARN: No session specified for webfingerHandle')
+ print('WARN: No session specified for webfinger_handle')
return None
- nickname, domain, grpAccount = _parseHandle(handle)
+ nickname, domain, _ = _parse_handle(handle)
if not nickname:
+ print('WARN: No nickname found in handle ' + handle)
return None
- if grpAccount:
- groupAccount = True
- wfDomain = removeDomainPort(domain)
+ wf_domain = remove_domain_port(domain)
- wfHandle = nickname + '@' + wfDomain
- wf = getWebfingerFromCache(wfHandle, cachedWebfingers)
- if wf:
+ wf_handle = nickname + '@' + wf_domain
+ if debug:
+ print('Parsed webfinger handle: ' + handle + ' -> ' + wf_handle)
+ wfg = get_webfinger_from_cache(wf_handle, cached_webfingers)
+ if wfg:
if debug:
- print('Webfinger from cache: ' + str(wf))
- return wf
- url = '{}://{}/.well-known/webfinger'.format(httpPrefix, domain)
+ print('Webfinger from cache: ' + str(wfg))
+ return wfg
+ url = '{}://{}/.well-known/webfinger'.format(http_prefix, domain)
hdr = {
'Accept': 'application/jrd+json'
}
- if not groupAccount:
- par = {
- 'resource': 'acct:{}'.format(wfHandle)
- }
- else:
- par = {
- 'resource': 'group:{}'.format(wfHandle)
- }
+ par = {
+ 'resource': 'acct:{}'.format(wf_handle)
+ }
try:
result = \
- getJson(signingPrivateKeyPem, session, url, hdr, par,
- debug, projectVersion, httpPrefix, fromDomain)
- except Exception as e:
- print('ERROR: webfingerHandle ' + str(e))
+ get_json(signing_priv_key_pem, session, url, hdr, par,
+ debug, project_version, http_prefix, from_domain)
+ except Exception as ex:
+ print('ERROR: webfinger_handle ' + wf_handle + ' ' + str(ex))
return None
+ # if the first attempt fails then try specifying the webfinger
+ # resource in a different way
+ if not result:
+ resource = handle
+ if handle == wf_handle:
+ # reconstruct the actor
+ resource = http_prefix + '://' + wf_domain + '/users/' + nickname
+ # try again using the actor as the resource
+ # See https://datatracker.ietf.org/doc/html/rfc7033 section 4.5
+ par = {
+ 'resource': '{}'.format(resource)
+ }
+ try:
+ result = \
+ get_json(signing_priv_key_pem, session, url, hdr, par,
+ debug, project_version, http_prefix, from_domain)
+ except Exception as ex:
+ print('ERROR: webfinger_handle ' + wf_handle + ' ' + str(ex))
+ return None
+
if result:
- storeWebfingerInCache(wfHandle, result, cachedWebfingers)
+ store_webfinger_in_cache(wf_handle, result, cached_webfingers)
else:
- if debug:
- print("WARN: Unable to webfinger " + url + ' ' +
- 'nickname: ' + str(nickname) + ' ' +
- 'domain: ' + str(wfDomain) + ' ' +
- 'headers: ' + str(hdr) + ' ' +
- 'params: ' + str(par))
+ print("WARN: Unable to webfinger " + str(url) + ' ' +
+ 'from_domain: ' + str(from_domain) + ' ' +
+ 'nickname: ' + str(nickname) + ' ' +
+ 'handle: ' + str(handle) + ' ' +
+ 'wf_handle: ' + str(wf_handle) + ' ' +
+ 'domain: ' + str(wf_domain) + ' ' +
+ 'headers: ' + str(hdr) + ' ' +
+ 'params: ' + str(par))
return result
-def storeWebfingerEndpoint(nickname: str, domain: str, port: int,
- baseDir: str, wfJson: {}) -> bool:
+def store_webfinger_endpoint(nickname: str, domain: str, port: int,
+ base_dir: str, wf_json: {}) -> bool:
"""Stores webfinger endpoint for a user to a file
"""
- originalDomain = domain
- domain = getFullDomain(domain, port)
+ original_domain = domain
+ domain = get_full_domain(domain, port)
handle = nickname + '@' + domain
- wfSubdir = '/wfendpoints'
- if not os.path.isdir(baseDir + wfSubdir):
- os.mkdir(baseDir + wfSubdir)
- filename = baseDir + wfSubdir + '/' + handle + '.json'
- saveJson(wfJson, filename)
+ wf_subdir = '/wfendpoints'
+ if not os.path.isdir(base_dir + wf_subdir):
+ os.mkdir(base_dir + wf_subdir)
+ filename = base_dir + wf_subdir + '/' + handle + '.json'
+ save_json(wf_json, filename)
if nickname == 'inbox':
- handle = originalDomain + '@' + domain
- filename = baseDir + wfSubdir + '/' + handle + '.json'
- saveJson(wfJson, filename)
+ handle = original_domain + '@' + domain
+ filename = base_dir + wf_subdir + '/' + handle + '.json'
+ save_json(wf_json, filename)
return True
-def createWebfingerEndpoint(nickname: str, domain: str, port: int,
- httpPrefix: str, publicKeyPem: str,
- groupAccount: bool) -> {}:
+def create_webfinger_endpoint(nickname: str, domain: str, port: int,
+ http_prefix: str, public_key_pem: str,
+ group_account: bool) -> {}:
"""Creates a webfinger endpoint for a user
+ NOTE: in earlier implementations group_account modified the acct prefix.
+ This has been left in, because currently there is still no consensus
+ about how groups should be implemented.
"""
- originalDomain = domain
- domain = getFullDomain(domain, port)
+ original_domain = domain
+ domain = get_full_domain(domain, port)
- personName = nickname
- personId = localActorUrl(httpPrefix, personName, domain)
- if not groupAccount:
- subjectStr = "acct:" + personName + "@" + originalDomain
- else:
- subjectStr = "group:" + personName + "@" + originalDomain
- profilePageHref = httpPrefix + "://" + domain + "/@" + nickname
- if nickname == 'inbox' or nickname == originalDomain:
- personName = 'actor'
- personId = httpPrefix + "://" + domain + "/" + personName
- subjectStr = "acct:" + originalDomain + "@" + originalDomain
- profilePageHref = httpPrefix + '://' + domain + \
+ person_name = nickname
+ person_id = local_actor_url(http_prefix, person_name, domain)
+ subject_str = "acct:" + person_name + "@" + original_domain
+ profile_page_href = http_prefix + "://" + domain + "/@" + nickname
+ if nickname in ('inbox', original_domain):
+ person_name = 'actor'
+ person_id = http_prefix + "://" + domain + "/" + person_name
+ subject_str = "acct:" + original_domain + "@" + original_domain
+ profile_page_href = http_prefix + '://' + domain + \
'/about/more?instance_actor=true'
+ person_link = http_prefix + "://" + domain + "/@" + person_name
+ blog_url = http_prefix + "://" + domain + "/blog/" + person_name
account = {
"aliases": [
- httpPrefix + "://" + domain + "/@" + personName,
- personId
+ person_link,
+ person_id
],
"links": [
{
- "href": profilePageHref,
+ "href": person_link + "/avatar.png",
+ "rel": "http://webfinger.net/rel/avatar",
+ "type": "image/png"
+ },
+ {
+ "href": blog_url,
+ "rel": "http://webfinger.net/rel/blog"
+ },
+ {
+ "href": profile_page_href,
"rel": "http://webfinger.net/rel/profile-page",
"type": "text/html"
},
{
- "href": personId,
+ "href": profile_page_href,
+ "rel": "http://webfinger.net/rel/profile-page",
+ "type": "text/vcard"
+ },
+ {
+ "href": person_id,
"rel": "self",
"type": "application/activity+json"
}
],
- "subject": subjectStr
+ "subject": subject_str
}
return account
-def webfingerNodeInfo(httpPrefix: str, domainFull: str) -> {}:
+def webfinger_node_info(http_prefix: str, domain_full: str) -> {}:
""" /.well-known/nodeinfo endpoint
"""
nodeinfo = {
'links': [
{
- 'href': httpPrefix + '://' + domainFull + '/nodeinfo/2.0',
+ 'href': http_prefix + '://' + domain_full + '/nodeinfo/2.0',
'rel': 'http://nodeinfo.diaspora.software/ns/schema/2.0'
}
]
@@ -195,197 +232,299 @@ def webfingerNodeInfo(httpPrefix: str, domainFull: str) -> {}:
return nodeinfo
-def webfingerMeta(httpPrefix: str, domainFull: str) -> str:
+def webfinger_meta(http_prefix: str, domain_full: str) -> str:
"""Return /.well-known/host-meta
"""
- metaStr = \
+ meta_str = \
"" + \
"" + \
"" + \
- "" + domainFull + "" + \
+ "" + domain_full + "" + \
"" + \
"" + \
" Resource Descriptor" + \
" " + \
""
- return metaStr
+ return meta_str
-def webfingerLookup(path: str, baseDir: str,
- domain: str, onionDomain: str,
- port: int, debug: bool) -> {}:
+def webfinger_lookup(path: str, base_dir: str,
+ domain: str, onion_domain: str, i2p_domain: str,
+ port: int, debug: bool) -> {}:
"""Lookup the webfinger endpoint for an account
"""
if not path.startswith('/.well-known/webfinger?'):
return None
handle = None
- resourceTypes = ('acct', 'group')
- for resType in resourceTypes:
- if 'resource=' + resType + ':' in path:
- handle = path.split('resource=' + resType + ':')[1].strip()
- handle = urllib.parse.unquote(handle)
- if debug:
- print('DEBUG: WEBFINGER handle ' + handle)
- break
- elif 'resource=' + resType + '%3A' in path:
- handle = path.split('resource=' + resType + '%3A')[1]
- handle = urllib.parse.unquote(handle.strip())
- if debug:
- print('DEBUG: WEBFINGER handle ' + handle)
- break
+ res_type = 'acct'
+ if 'resource=' + res_type + ':' in path:
+ handle = path.split('resource=' + res_type + ':')[1].strip()
+ handle = urllib.parse.unquote(handle)
+ if debug:
+ print('DEBUG: WEBFINGER handle ' + handle)
+ elif 'resource=' + res_type + '%3A' in path:
+ handle = path.split('resource=' + res_type + '%3A')[1]
+ handle = urllib.parse.unquote(handle.strip())
+ if debug:
+ print('DEBUG: WEBFINGER handle ' + handle)
if not handle:
if debug:
print('DEBUG: WEBFINGER handle missing')
return None
if '&' in handle:
handle = handle.split('&')[0].strip()
- if debug:
- print('DEBUG: WEBFINGER handle with & removed ' + handle)
+ print('DEBUG: WEBFINGER handle with & removed ' + handle)
if '@' not in handle:
if debug:
print('DEBUG: WEBFINGER no @ in handle ' + handle)
return None
- handle = getFullDomain(handle, port)
+ handle = get_full_domain(handle, port)
# convert @domain@domain to inbox@domain
if '@' in handle:
- handleDomain = handle.split('@')[1]
- if handle.startswith(handleDomain + '@'):
- handle = 'inbox@' + handleDomain
+ handle_domain = handle.split('@')[1]
+ if handle.startswith(handle_domain + '@'):
+ handle = 'inbox@' + handle_domain
# if this is a lookup for a handle using its onion domain
# then swap the onion domain for the clearnet version
onionify = False
- if onionDomain:
- if onionDomain in handle:
- handle = handle.replace(onionDomain, domain)
+ if onion_domain:
+ if onion_domain in handle:
+ handle = handle.replace(onion_domain, domain)
onionify = True
+ i2pify = False
+ if i2p_domain:
+ if i2p_domain in handle:
+ handle = handle.replace(i2p_domain, domain)
+ i2pify = True
# instance actor
if handle.startswith('actor@'):
handle = handle.replace('actor@', 'inbox@', 1)
elif handle.startswith('Actor@'):
handle = handle.replace('Actor@', 'inbox@', 1)
- filename = baseDir + '/wfendpoints/' + handle + '.json'
+ filename = base_dir + '/wfendpoints/' + handle + '.json'
if debug:
print('DEBUG: WEBFINGER filename ' + filename)
if not os.path.isfile(filename):
if debug:
print('DEBUG: WEBFINGER filename not found ' + filename)
return None
- if not onionify:
- wfJson = loadJson(filename)
- else:
+ if not onionify and not i2pify:
+ wf_json = load_json(filename)
+ elif onionify:
print('Webfinger request for onionified ' + handle)
- wfJson = loadJsonOnionify(filename, domain, onionDomain)
- if not wfJson:
- wfJson = {"nickname": "unknown"}
- return wfJson
+ wf_json = load_json_onionify(filename, domain, onion_domain)
+ else:
+ print('Webfinger request for i2pified ' + handle)
+ wf_json = load_json_onionify(filename, domain, i2p_domain)
+ if not wf_json:
+ wf_json = {"nickname": "unknown"}
+ return wf_json
-def _webfingerUpdateFromProfile(wfJson: {}, actorJson: {}) -> bool:
+def _webfinger_update_avatar(wf_json: {}, actor_json: {}) -> bool:
+ """Updates the avatar image link
+ """
+ found = False
+ avatar_url = actor_json['icon']['url']
+ media_type = actor_json['icon']['mediaType']
+ for link in wf_json['links']:
+ if not link.get('rel'):
+ continue
+ if not link['rel'].endswith('://webfinger.net/rel/avatar'):
+ continue
+ found = True
+ if link['href'] != avatar_url or link['type'] != media_type:
+ link['href'] = avatar_url
+ link['type'] = media_type
+ return True
+ break
+ if found:
+ return False
+ wf_json['links'].append({
+ "href": avatar_url,
+ "rel": "http://webfinger.net/rel/avatar",
+ "type": media_type
+ })
+ return True
+
+
+def _webfinger_update_vcard(wf_json: {}, actor_json: {}) -> bool:
+ """Updates the vcard link
+ """
+ for link in wf_json['links']:
+ if link.get('type'):
+ if link['type'] == 'text/vcard':
+ return False
+ wf_json['links'].append({
+ "href": actor_json['url'],
+ "rel": "http://webfinger.net/rel/profile-page",
+ "type": "text/vcard"
+ })
+ return True
+
+
+def _webfinger_add_blog_link(wf_json: {}, actor_json: {}) -> bool:
+ """Adds a blog link to webfinger if needed
+ """
+ found = False
+ if '/users/' in actor_json['id']:
+ blog_url = \
+ actor_json['id'].split('/users/')[0] + '/blog/' + \
+ actor_json['id'].split('/users/')[1]
+ else:
+ blog_url = \
+ actor_json['id'].split('/@')[0] + '/blog/' + \
+ actor_json['id'].split('/@')[1]
+ for link in wf_json['links']:
+ if not link.get('rel'):
+ continue
+ if not link['rel'].endswith('://webfinger.net/rel/blog'):
+ continue
+ found = True
+ if link['href'] != blog_url:
+ link['href'] = blog_url
+ return True
+ break
+ if found:
+ return False
+ wf_json['links'].append({
+ "href": blog_url,
+ "rel": "http://webfinger.net/rel/blog"
+ })
+ return True
+
+
+def _webfinger_updateFromProfile(wf_json: {}, actor_json: {}) -> bool:
"""Updates webfinger Email/blog/xmpp links from profile
Returns true if one or more tags has been changed
"""
- if not actorJson.get('attachment'):
+ if not actor_json.get('attachment'):
return False
changed = False
- webfingerPropertyName = {
+ webfinger_property_name = {
"xmpp": "xmpp",
"matrix": "matrix",
"email": "mailto",
"ssb": "ssb",
"briar": "briar",
"cwtch": "cwtch",
- "jami": "jami",
"tox": "toxId"
}
- aliasesNotFound = []
- for name, alias in webfingerPropertyName.items():
- aliasesNotFound.append(alias)
+ aliases_not_found = []
+ for name, alias in webfinger_property_name.items():
+ aliases_not_found.append(alias)
- for propertyValue in actorJson['attachment']:
- if not propertyValue.get('name'):
+ for property_value in actor_json['attachment']:
+ name_value = None
+ if property_value.get('name'):
+ name_value = property_value['name']
+ elif property_value.get('schema:name'):
+ name_value = property_value['schema:name']
+ if not name_value:
continue
- propertyName = propertyValue['name'].lower()
+ property_name = name_value.lower()
found = False
- for name, alias in webfingerPropertyName.items():
- if name == propertyName:
- if alias in aliasesNotFound:
- aliasesNotFound.remove(alias)
+ for name, alias in webfinger_property_name.items():
+ if name == property_name:
+ if alias in aliases_not_found:
+ aliases_not_found.remove(alias)
found = True
break
if not found:
continue
- if not propertyValue.get('type'):
+ if not property_value.get('type'):
continue
- if not propertyValue.get('value'):
+ prop_value_name, _ = \
+ get_attachment_property_value(property_value)
+ if not prop_value_name:
continue
- if propertyValue['type'] != 'PropertyValue':
+ if not property_value['type'].endswith('PropertyValue'):
continue
- newValue = propertyValue['value'].strip()
- if '://' in newValue:
- newValue = newValue.split('://')[1]
+ new_value = property_value[prop_value_name].strip()
+ if '://' in new_value:
+ new_value = new_value.split('://')[1]
- aliasIndex = 0
+ alias_index = 0
found = False
- for alias in wfJson['aliases']:
- if alias.startswith(webfingerPropertyName[propertyName] + ':'):
+ for alias in wf_json['aliases']:
+ if alias.startswith(webfinger_property_name[property_name] + ':'):
found = True
break
- aliasIndex += 1
- newAlias = webfingerPropertyName[propertyName] + ':' + newValue
+ alias_index += 1
+ new_alias = webfinger_property_name[property_name] + ':' + new_value
if found:
- if wfJson['aliases'][aliasIndex] != newAlias:
+ if wf_json['aliases'][alias_index] != new_alias:
changed = True
- wfJson['aliases'][aliasIndex] = newAlias
+ wf_json['aliases'][alias_index] = new_alias
else:
- wfJson['aliases'].append(newAlias)
+ wf_json['aliases'].append(new_alias)
changed = True
# remove any aliases which are no longer in the actor profile
- removeAlias = []
- for alias in aliasesNotFound:
- for fullAlias in wfJson['aliases']:
- if fullAlias.startswith(alias + ':'):
- removeAlias.append(fullAlias)
- for fullAlias in removeAlias:
- wfJson['aliases'].remove(fullAlias)
+ remove_alias = []
+ for alias in aliases_not_found:
+ for full_alias in wf_json['aliases']:
+ if full_alias.startswith(alias + ':'):
+ remove_alias.append(full_alias)
+ for full_alias in remove_alias:
+ wf_json['aliases'].remove(full_alias)
+ changed = True
+
+ if _webfinger_update_avatar(wf_json, actor_json):
+ changed = True
+
+ if _webfinger_update_vcard(wf_json, actor_json):
+ changed = True
+
+ if _webfinger_add_blog_link(wf_json, actor_json):
changed = True
return changed
-def webfingerUpdate(baseDir: str, nickname: str, domain: str,
- onionDomain: str,
- cachedWebfingers: {}) -> None:
+def webfinger_update(base_dir: str, nickname: str, domain: str,
+ onion_domain: str, i2p_domain: str,
+ cached_webfingers: {}) -> None:
+ """Regenerates stored webfinger
+ """
handle = nickname + '@' + domain
- wfSubdir = '/wfendpoints'
- if not os.path.isdir(baseDir + wfSubdir):
+ wf_subdir = '/wfendpoints'
+ if not os.path.isdir(base_dir + wf_subdir):
return
- filename = baseDir + wfSubdir + '/' + handle + '.json'
+ filename = base_dir + wf_subdir + '/' + handle + '.json'
onionify = False
- if onionDomain:
- if onionDomain in handle:
- handle = handle.replace(onionDomain, domain)
+ i2pify = False
+ if onion_domain:
+ if onion_domain in handle:
+ handle = handle.replace(onion_domain, domain)
onionify = True
+ elif i2p_domain:
+ if i2p_domain in handle:
+ handle = handle.replace(i2p_domain, domain)
+ i2pify = True
if not onionify:
- wfJson = loadJson(filename)
+ if not i2pify:
+ wf_json = load_json(filename)
+ else:
+ wf_json = load_json_onionify(filename, domain, i2p_domain)
else:
- wfJson = loadJsonOnionify(filename, domain, onionDomain)
- if not wfJson:
+ wf_json = load_json_onionify(filename, domain, onion_domain)
+ if not wf_json:
return
- actorFilename = baseDir + '/accounts/' + handle + '.json'
- actorJson = loadJson(actorFilename)
- if not actorJson:
+ actor_filename = base_dir + '/accounts/' + handle + '.json'
+ actor_json = load_json(actor_filename)
+ if not actor_json:
return
- if _webfingerUpdateFromProfile(wfJson, actorJson):
- if saveJson(wfJson, filename):
- storeWebfingerInCache(handle, wfJson, cachedWebfingers)
+ if _webfinger_updateFromProfile(wf_json, actor_json):
+ if save_json(wf_json, filename):
+ store_webfinger_in_cache(handle, wf_json, cached_webfingers)
diff --git a/website/EN/index.html b/website/EN/index.html
index a30abfcdd..58ae522eb 100644
--- a/website/EN/index.html
+++ b/website/EN/index.html
@@ -11,678 +11,682 @@
"author" : {
"@type" : "Person",
"name" : "Bob Mottram",
- "email": "bob@libreserver.org",
- "url": "https://epicyon.libreserver.org/users/bob"
+ "email": "bob@libreserver.org",
+ "url": "https://epicyon.libreserver.org/users/bob"
},
- "applicationCategory" : ["server", "software", "bash", "debian", "linux", "self-hosting"],
+ "applicationCategory" : ["server", "software", "bash", "debian", "linux", "self-hosting", "raspberry-pi"],
"downloadUrl" : "https://libreserver.org/epicyon/epicyon.tar.gz"
}
-
+
-
+
+
+ Epicyon ActivityPub server release version 1.3.0
+
+
+
+
+
+
Epicyon release version 1.3.0
+
"Norfolk Terrier"
+
3rd Feb 2022
+
+
Over the last year Epicyon has seen some major improvements, such as keyboard navigation, much faster cryptography and federated shared items. So it has been an unusually busy year for the project and I expect that future development is going to become less hectic and more incremental.
+
Like a Norfolk Terrier, the fediverse continues to be a lively and more friendly companion than the smog monster which the big commercial social networks have turned into. Even if it doesn't take over the world or isn't to everyone's taste, being able to run your own small social network sites the way you want to and on low cost hardware is a victory.
+
Epicyon is not intended to compete with Mastodon, and there are no public Epicyon instances to try. This is expected to be a self-hosted system for instances with ten user accounts or less. Don't scale up, scale out horizontally, like the rhizome.
+
Some of the changes since the previous version are:
+
+
Switched from pycryptodome to python3-cryptography, with 20X speed increase
Epicyon can be downloaded as a gzipped file, or you can get the latest version from the git repo. For installation instructions see the main page. To upgrade an existing instance, make sure that you have the python3-cryptography package installed then do a git pull, chown and restart the daemon. Upgrades to web systems do not need to be a huge drama.
+
+
+
diff --git a/xmpp.py b/xmpp.py
index 9fc124c8d..1cdc57510 100644
--- a/xmpp.py
+++ b/xmpp.py
@@ -1,89 +1,112 @@
__filename__ = "xmpp.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
-__version__ = "1.2.0"
+__version__ = "1.3.0"
__maintainer__ = "Bob Mottram"
__email__ = "bob@libreserver.org"
__status__ = "Production"
__module_group__ = "Profile Metadata"
-def getXmppAddress(actorJson: {}) -> str:
+from utils import get_attachment_property_value
+
+
+def get_xmpp_address(actor_json: {}) -> str:
"""Returns xmpp address for the given actor
"""
- if not actorJson.get('attachment'):
+ if not actor_json.get('attachment'):
return ''
- for propertyValue in actorJson['attachment']:
- if not propertyValue.get('name'):
+ for property_value in actor_json['attachment']:
+ name_value = None
+ if property_value.get('name'):
+ name_value = property_value['name'].lower()
+ elif property_value.get('schema:name'):
+ name_value = property_value['schema:name'].lower()
+ if not name_value:
continue
- nameLower = propertyValue['name'].lower()
- if not (nameLower.startswith('xmpp') or
- nameLower.startswith('jabber')):
+ if not (name_value.startswith('xmpp') or
+ name_value.startswith('jabber')):
continue
- if not propertyValue.get('type'):
+ if not property_value.get('type'):
continue
- if not propertyValue.get('value'):
+ prop_value_name, _ = \
+ get_attachment_property_value(property_value)
+ if not prop_value_name:
continue
- if propertyValue['type'] != 'PropertyValue':
+ if not property_value['type'].endswith('PropertyValue'):
continue
- if '@' not in propertyValue['value']:
+ if '@' not in property_value[prop_value_name]:
continue
- if '"' in propertyValue['value']:
+ if '"' in property_value[prop_value_name]:
continue
- return propertyValue['value']
+ return property_value[prop_value_name]
return ''
-def setXmppAddress(actorJson: {}, xmppAddress: str) -> None:
+def set_xmpp_address(actor_json: {}, xmpp_address: str) -> None:
"""Sets an xmpp address for the given actor
"""
- notXmppAddress = False
- if '@' not in xmppAddress:
- notXmppAddress = True
- if '.' not in xmppAddress:
- notXmppAddress = True
- if '"' in xmppAddress:
- notXmppAddress = True
- if '<' in xmppAddress:
- notXmppAddress = True
+ not_xmpp_address = False
+ if '@' not in xmpp_address:
+ not_xmpp_address = True
+ if '.' not in xmpp_address:
+ not_xmpp_address = True
+ if '"' in xmpp_address:
+ not_xmpp_address = True
+ if '<' in xmpp_address:
+ not_xmpp_address = True
- if not actorJson.get('attachment'):
- actorJson['attachment'] = []
+ if not actor_json.get('attachment'):
+ actor_json['attachment'] = []
# remove any existing value
- propertyFound = None
- for propertyValue in actorJson['attachment']:
- if not propertyValue.get('name'):
+ property_found = None
+ for property_value in actor_json['attachment']:
+ name_value = None
+ if property_value.get('name'):
+ name_value = property_value['name']
+ elif property_value.get('schema:name'):
+ name_value = property_value['schema:name']
+ if not name_value:
continue
- if not propertyValue.get('type'):
+ if not property_value.get('type'):
continue
- if not (propertyValue['name'].lower().startswith('xmpp') or
- propertyValue['name'].lower().startswith('jabber')):
+ if not (name_value.lower().startswith('xmpp') or
+ name_value.lower().startswith('jabber')):
continue
- propertyFound = propertyValue
+ property_found = property_value
break
- if propertyFound:
- actorJson['attachment'].remove(propertyFound)
- if notXmppAddress:
+ if property_found:
+ actor_json['attachment'].remove(property_found)
+ if not_xmpp_address:
return
- for propertyValue in actorJson['attachment']:
- if not propertyValue.get('name'):
+ for property_value in actor_json['attachment']:
+ name_value = None
+ if property_value.get('name'):
+ name_value = property_value['name']
+ elif property_value.get('schema:name'):
+ name_value = property_value['schema:name']
+ if not name_value:
continue
- if not propertyValue.get('type'):
+ if not property_value.get('type'):
continue
- nameLower = propertyValue['name'].lower()
- if not (nameLower.startswith('xmpp') or
- nameLower.startswith('jabber')):
+ name_value = name_value.lower()
+ if not (name_value.startswith('xmpp') or
+ name_value.startswith('jabber')):
continue
- if propertyValue['type'] != 'PropertyValue':
+ if not property_value['type'].endswith('PropertyValue'):
continue
- propertyValue['value'] = xmppAddress
+ prop_value_name, _ = \
+ get_attachment_property_value(property_value)
+ if not prop_value_name:
+ continue
+ property_value[prop_value_name] = xmpp_address
return
- newXmppAddress = {
+ new_xmpp_address = {
"name": "XMPP",
"type": "PropertyValue",
- "value": xmppAddress
+ "value": xmpp_address
}
- actorJson['attachment'].append(newXmppAddress)
+ actor_json['attachment'].append(new_xmpp_address)