Snake case

merge-requests/30/head
Bob Mottram 2021-12-27 15:43:22 +00:00
parent e2b7407e96
commit 501feef616
21 changed files with 96 additions and 96 deletions

View File

@ -22,7 +22,7 @@ from utils import get_actor_languages_list
from utils import get_base_content_from_post from utils import get_base_content_from_post
from utils import get_content_from_post from utils import get_content_from_post
from utils import is_account_dir from utils import is_account_dir
from utils import removeHtml from utils import remove_html
from utils import get_config_param from utils import get_config_param
from utils import get_full_domain from utils import get_full_domain
from utils import getMediaFormats from utils import getMediaFormats
@ -434,7 +434,7 @@ def _getSnippetFromBlogContent(post_json_object: {},
content = content.split('<p>', 1)[1] content = content.split('<p>', 1)[1]
if '</p>' in content: if '</p>' in content:
content = content.split('</p>', 1)[0] content = content.split('</p>', 1)[0]
content = removeHtml(content) content = remove_html(content)
if '\n' in content: if '\n' in content:
content = content.split('\n')[0] content = content.split('\n')[0]
if len(content) >= 256: if len(content) >= 256:

View File

@ -25,12 +25,12 @@ from utils import contains_pgp_public_key
from utils import acct_dir from utils import acct_dir
from utils import is_float from utils import is_float
from utils import get_currencies from utils import get_currencies
from utils import removeHtml from utils import remove_html
from petnames import getPetName from petnames import getPetName
from session import downloadImage from session import downloadImage
def removeHtmlTag(htmlStr: str, tag: str) -> str: def remove_htmlTag(htmlStr: str, tag: str) -> str:
"""Removes a given tag from a html string """Removes a given tag from a html string
""" """
tagFound = True tagFound = True
@ -1253,7 +1253,7 @@ def _wordsSimilarityWordsList(content: str) -> []:
"""Returns a list of words for the given content """Returns a list of words for the given content
""" """
removePunctuation = ('.', ',', ';', '-', ':', '"') removePunctuation = ('.', ',', ';', '-', ':', '"')
content = removeHtml(content).lower() content = remove_html(content).lower()
for p in removePunctuation: for p in removePunctuation:
content = content.replace(p, ' ') content = content.replace(p, ' ')
content = content.replace(' ', ' ') content = content.replace(' ', ' ')

View File

@ -273,7 +273,7 @@ from utils import isPublicPost
from utils import get_locked_account from utils import get_locked_account
from utils import has_users_path from utils import has_users_path
from utils import get_full_domain from utils import get_full_domain
from utils import removeHtml from utils import remove_html
from utils import is_editor from utils import is_editor
from utils import is_artist from utils import is_artist
from utils import get_image_extensions from utils import get_image_extensions
@ -4998,7 +4998,7 @@ class PubServer(BaseHTTPRequestHandler):
if fields.get('displayNickname'): if fields.get('displayNickname'):
if fields['displayNickname'] != actor_json['name']: if fields['displayNickname'] != actor_json['name']:
displayName = \ displayName = \
removeHtml(fields['displayNickname']) remove_html(fields['displayNickname'])
if not isFiltered(base_dir, if not isFiltered(base_dir,
nickname, domain, nickname, domain,
displayName): displayName):
@ -5477,7 +5477,7 @@ class PubServer(BaseHTTPRequestHandler):
occupationName = get_occupation_name(actor_json) occupationName = get_occupation_name(actor_json)
if fields.get('occupationName'): if fields.get('occupationName'):
fields['occupationName'] = \ fields['occupationName'] = \
removeHtml(fields['occupationName']) remove_html(fields['occupationName'])
if occupationName != \ if occupationName != \
fields['occupationName']: fields['occupationName']:
set_occupation_name(actor_json, set_occupation_name(actor_json,
@ -5524,7 +5524,7 @@ class PubServer(BaseHTTPRequestHandler):
# change user bio # change user bio
if fields.get('bio'): if fields.get('bio'):
if fields['bio'] != actor_json['summary']: if fields['bio'] != actor_json['summary']:
bioStr = removeHtml(fields['bio']) bioStr = remove_html(fields['bio'])
if not isFiltered(base_dir, if not isFiltered(base_dir,
nickname, domain, bioStr): nickname, domain, bioStr):
actorTags = {} actorTags = {}

View File

@ -21,7 +21,7 @@ from utils import has_object_dict
from utils import get_full_domain from utils import get_full_domain
from utils import is_dm from utils import is_dm
from utils import load_translations_from_file from utils import load_translations_from_file
from utils import removeHtml from utils import remove_html
from utils import getNicknameFromActor from utils import getNicknameFromActor
from utils import getDomainFromActor from utils import getDomainFromActor
from utils import is_pgp_encrypted from utils import is_pgp_encrypted
@ -586,7 +586,7 @@ def _textOnlyContent(content: str) -> str:
""" """
content = urllib.parse.unquote_plus(content) content = urllib.parse.unquote_plus(content)
content = html.unescape(content) content = html.unescape(content)
return removeHtml(content) return remove_html(content)
def _getImageDescription(post_json_object: {}) -> str: def _getImageDescription(post_json_object: {}) -> str:
@ -827,7 +827,7 @@ def _desktopShowActor(base_dir: str, actor_json: {}, translate: {},
sayStr = 'Also known as ' + html.unescape(alsoKnownAsStr) sayStr = 'Also known as ' + html.unescape(alsoKnownAsStr)
_sayCommand(sayStr, sayStr, screenreader, system_language, espeak) _sayCommand(sayStr, sayStr, screenreader, system_language, espeak)
if actor_json.get('summary'): if actor_json.get('summary'):
sayStr = html.unescape(removeHtml(actor_json['summary'])) sayStr = html.unescape(remove_html(actor_json['summary']))
sayStr = sayStr.replace('"', "'") sayStr = sayStr.replace('"', "'")
sayStr2 = speakableText(base_dir, sayStr, translate)[0] sayStr2 = speakableText(base_dir, sayStr, translate)[0]
_sayCommand(sayStr, sayStr2, screenreader, system_language, espeak) _sayCommand(sayStr, sayStr2, screenreader, system_language, espeak)

View File

@ -21,7 +21,7 @@ from utils import domainPermitted
from utils import is_group_account from utils import is_group_account
from utils import is_system_account from utils import is_system_account
from utils import invalid_ciphertext from utils import invalid_ciphertext
from utils import removeHtml from utils import remove_html
from utils import fileLastModified from utils import fileLastModified
from utils import has_object_string from utils import has_object_string
from utils import has_object_string_object from utils import has_object_string_object
@ -1290,7 +1290,7 @@ def _receiveReaction(recent_posts_cache: {},
handleDom = handle.split('@')[1] handleDom = handle.split('@')[1]
postReactionId = message_json['object'] postReactionId = message_json['object']
emojiContent = removeHtml(message_json['content']) emojiContent = remove_html(message_json['content'])
if not emojiContent: if not emojiContent:
if debug: if debug:
print('DEBUG: emoji reaction has no content') print('DEBUG: emoji reaction has no content')
@ -1441,7 +1441,7 @@ def _receiveUndoReaction(recent_posts_cache: {},
print('DEBUG: reaction post found in inbox. Now undoing.') print('DEBUG: reaction post found in inbox. Now undoing.')
reactionActor = message_json['actor'] reactionActor = message_json['actor']
postReactionId = message_json['object'] postReactionId = message_json['object']
emojiContent = removeHtml(message_json['object']['content']) emojiContent = remove_html(message_json['object']['content'])
if not emojiContent: if not emojiContent:
if debug: if debug:
print('DEBUG: unreaction has no content') print('DEBUG: unreaction has no content')

View File

@ -11,7 +11,7 @@ import os
import json import json
from urllib import request, parse from urllib import request, parse
from utils import get_actor_languages_list from utils import get_actor_languages_list
from utils import removeHtml from utils import remove_html
from utils import has_object_dict from utils import has_object_dict
from utils import get_config_param from utils import get_config_param
from utils import local_actor_url from utils import local_actor_url
@ -240,7 +240,7 @@ def libretranslate(url: str, text: str,
links = getLinksFromContent(text) links = getLinksFromContent(text)
# LibreTranslate doesn't like markup # LibreTranslate doesn't like markup
text = removeHtml(text) text = remove_html(text)
# remove any links from plain text version of the content # remove any links from plain text version of the content
for _, url in links.items(): for _, url in links.items():
@ -302,7 +302,7 @@ def autoTranslatePost(base_dir: str, post_json_object: {},
lang, system_language, lang, system_language,
libretranslateApiKey) libretranslateApiKey)
if translatedText: if translatedText:
if removeHtml(translatedText) == removeHtml(content): if remove_html(translatedText) == remove_html(content):
return content return content
translatedText = \ translatedText = \
'<p>' + translate['Translated'].upper() + '</p>' + \ '<p>' + translate['Translated'].upper() + '</p>' + \

View File

@ -26,7 +26,7 @@ from posts import createNewsPost
from posts import archivePostsForPerson from posts import archivePostsForPerson
from content import validHashTag from content import validHashTag
from utils import get_base_content_from_post from utils import get_base_content_from_post
from utils import removeHtml from utils import remove_html
from utils import get_full_domain from utils import get_full_domain
from utils import load_json from utils import load_json
from utils import save_json from utils import save_json
@ -605,7 +605,7 @@ def _convertRSStoActivityPub(base_dir: str, http_prefix: str,
rssDescription = '' rssDescription = ''
# get the rss description if it exists # get the rss description if it exists
rssDescription = '<p>' + removeHtml(item[4]) + '<p>' rssDescription = '<p>' + remove_html(item[4]) + '<p>'
mirrored = item[7] mirrored = item[7]
postUrl = url postUrl = url

View File

@ -29,7 +29,7 @@ from utils import load_json
from utils import save_json from utils import save_json
from utils import is_suspended from utils import is_suspended
from utils import containsInvalidChars from utils import containsInvalidChars
from utils import removeHtml from utils import remove_html
from utils import is_account_dir from utils import is_account_dir
from utils import acct_dir from utils import acct_dir
from utils import local_actor_url from utils import local_actor_url
@ -208,8 +208,8 @@ def _addNewswireDictEntry(base_dir: str, domain: str,
"""Update the newswire dictionary """Update the newswire dictionary
""" """
# remove any markup # remove any markup
title = removeHtml(title) title = remove_html(title)
description = removeHtml(description) description = remove_html(description)
allText = title + ' ' + description allText = title + ' ' + description
@ -419,17 +419,17 @@ def _xml2StrToDict(base_dir: str, domain: str, xmlStr: str,
continue continue
title = rssItem.split('<title>')[1] title = rssItem.split('<title>')[1]
title = _removeCDATA(title.split('</title>')[0]) title = _removeCDATA(title.split('</title>')[0])
title = removeHtml(title) title = remove_html(title)
description = '' description = ''
if '<description>' in rssItem and '</description>' in rssItem: if '<description>' in rssItem and '</description>' in rssItem:
description = rssItem.split('<description>')[1] description = rssItem.split('<description>')[1]
description = removeHtml(description.split('</description>')[0]) description = remove_html(description.split('</description>')[0])
else: else:
if '<media:description>' in rssItem and \ if '<media:description>' in rssItem and \
'</media:description>' in rssItem: '</media:description>' in rssItem:
description = rssItem.split('<media:description>')[1] description = rssItem.split('<media:description>')[1]
description = description.split('</media:description>')[0] description = description.split('</media:description>')[0]
description = removeHtml(description) description = remove_html(description)
link = rssItem.split('<link>')[1] link = rssItem.split('<link>')[1]
link = link.split('</link>')[0] link = link.split('</link>')[0]
if '://' not in link: if '://' not in link:
@ -507,17 +507,17 @@ def _xml1StrToDict(base_dir: str, domain: str, xmlStr: str,
continue continue
title = rssItem.split('<title>')[1] title = rssItem.split('<title>')[1]
title = _removeCDATA(title.split('</title>')[0]) title = _removeCDATA(title.split('</title>')[0])
title = removeHtml(title) title = remove_html(title)
description = '' description = ''
if '<description>' in rssItem and '</description>' in rssItem: if '<description>' in rssItem and '</description>' in rssItem:
description = rssItem.split('<description>')[1] description = rssItem.split('<description>')[1]
description = removeHtml(description.split('</description>')[0]) description = remove_html(description.split('</description>')[0])
else: else:
if '<media:description>' in rssItem and \ if '<media:description>' in rssItem and \
'</media:description>' in rssItem: '</media:description>' in rssItem:
description = rssItem.split('<media:description>')[1] description = rssItem.split('<media:description>')[1]
description = description.split('</media:description>')[0] description = description.split('</media:description>')[0]
description = removeHtml(description) description = remove_html(description)
link = rssItem.split('<link>')[1] link = rssItem.split('<link>')[1]
link = link.split('</link>')[0] link = link.split('</link>')[0]
if '://' not in link: if '://' not in link:
@ -583,17 +583,17 @@ def _atomFeedToDict(base_dir: str, domain: str, xmlStr: str,
continue continue
title = atomItem.split('<title>')[1] title = atomItem.split('<title>')[1]
title = _removeCDATA(title.split('</title>')[0]) title = _removeCDATA(title.split('</title>')[0])
title = removeHtml(title) title = remove_html(title)
description = '' description = ''
if '<summary>' in atomItem and '</summary>' in atomItem: if '<summary>' in atomItem and '</summary>' in atomItem:
description = atomItem.split('<summary>')[1] description = atomItem.split('<summary>')[1]
description = removeHtml(description.split('</summary>')[0]) description = remove_html(description.split('</summary>')[0])
else: else:
if '<media:description>' in atomItem and \ if '<media:description>' in atomItem and \
'</media:description>' in atomItem: '</media:description>' in atomItem:
description = atomItem.split('<media:description>')[1] description = atomItem.split('<media:description>')[1]
description = description.split('</media:description>')[0] description = description.split('</media:description>')[0]
description = removeHtml(description) description = remove_html(description)
link = atomItem.split('<link>')[1] link = atomItem.split('<link>')[1]
link = link.split('</link>')[0] link = link.split('</link>')[0]
if '://' not in link: if '://' not in link:
@ -670,11 +670,11 @@ def _jsonFeedV1ToDict(base_dir: str, domain: str, xmlStr: str,
if jsonFeedItem.get('content_html'): if jsonFeedItem.get('content_html'):
if not isinstance(jsonFeedItem['content_html'], str): if not isinstance(jsonFeedItem['content_html'], str):
continue continue
title = removeHtml(jsonFeedItem['content_html']) title = remove_html(jsonFeedItem['content_html'])
else: else:
if not isinstance(jsonFeedItem['content_text'], str): if not isinstance(jsonFeedItem['content_text'], str):
continue continue
title = removeHtml(jsonFeedItem['content_text']) title = remove_html(jsonFeedItem['content_text'])
if len(title) > maxBytes: if len(title) > maxBytes:
print('WARN: json feed title is too long') print('WARN: json feed title is too long')
continue continue
@ -682,7 +682,7 @@ def _jsonFeedV1ToDict(base_dir: str, domain: str, xmlStr: str,
if jsonFeedItem.get('description'): if jsonFeedItem.get('description'):
if not isinstance(jsonFeedItem['description'], str): if not isinstance(jsonFeedItem['description'], str):
continue continue
description = removeHtml(jsonFeedItem['description']) description = remove_html(jsonFeedItem['description'])
if len(description) > maxBytes: if len(description) > maxBytes:
print('WARN: json feed description is too long') print('WARN: json feed description is too long')
continue continue
@ -780,11 +780,11 @@ def _atomFeedYTToDict(base_dir: str, domain: str, xmlStr: str,
'</media:description>' in atomItem: '</media:description>' in atomItem:
description = atomItem.split('<media:description>')[1] description = atomItem.split('<media:description>')[1]
description = description.split('</media:description>')[0] description = description.split('</media:description>')[0]
description = removeHtml(description) description = remove_html(description)
elif '<summary>' in atomItem and '</summary>' in atomItem: elif '<summary>' in atomItem and '</summary>' in atomItem:
description = atomItem.split('<summary>')[1] description = atomItem.split('<summary>')[1]
description = description.split('</summary>')[0] description = description.split('</summary>')[0]
description = removeHtml(description) description = remove_html(description)
link = atomItem.split('<yt:videoId>')[1] link = atomItem.split('<yt:videoId>')[1]
link = link.split('</yt:videoId>')[0] link = link.split('</yt:videoId>')[0]
link = 'https://www.youtube.com/watch?v=' + link.strip() link = 'https://www.youtube.com/watch?v=' + link.strip()
@ -946,7 +946,7 @@ def getRSSfromDict(base_dir: str, newswire: {},
rssStr += \ rssStr += \
'<item>\n' + \ '<item>\n' + \
' <title>' + fields[0] + '</title>\n' ' <title>' + fields[0] + '</title>\n'
description = removeHtml(firstParagraphFromString(fields[4])) description = remove_html(firstParagraphFromString(fields[4]))
rssStr += ' <description>' + description + '</description>\n' rssStr += ' <description>' + description + '</description>\n'
url = fields[1] url = fields[1]
if '://' not in url: if '://' not in url:
@ -1065,7 +1065,7 @@ def _addAccountBlogsToNewswire(base_dir: str, nickname: str, domain: str,
get_base_content_from_post(post_json_object, get_base_content_from_post(post_json_object,
system_language) system_language)
description = firstParagraphFromString(content) description = firstParagraphFromString(content)
description = removeHtml(description) description = remove_html(description)
tagsFromPost = _getHashtagsFromPost(post_json_object) tagsFromPost = _getHashtagsFromPost(post_json_object)
summary = post_json_object['object']['summary'] summary = post_json_object['object']['summary']
_addNewswireDictEntry(base_dir, domain, _addNewswireDictEntry(base_dir, domain,

View File

@ -38,7 +38,7 @@ from roles import setRole
from roles import setRolesFromList from roles import setRolesFromList
from roles import getActorRolesList from roles import getActorRolesList
from media import processMetaData from media import processMetaData
from utils import removeHtml from utils import remove_html
from utils import containsInvalidChars from utils import containsInvalidChars
from utils import replace_users_with_at from utils import replace_users_with_at
from utils import remove_line_endings from utils import remove_line_endings
@ -1696,7 +1696,7 @@ def validSendingActor(session, base_dir: str,
if not unit_test: if not unit_test:
bioStr = '' bioStr = ''
if actor_json.get('summary'): if actor_json.get('summary'):
bioStr = removeHtml(actor_json['summary']).strip() bioStr = remove_html(actor_json['summary']).strip()
if not bioStr: if not bioStr:
# allow no bio if it's an actor in this instance # allow no bio if it's an actor in this instance
if domain not in sendingActor: if domain not in sendingActor:
@ -1707,7 +1707,7 @@ def validSendingActor(session, base_dir: str,
print('REJECT: actor bio is not long enough ' + print('REJECT: actor bio is not long enough ' +
sendingActor + ' ' + bioStr) sendingActor + ' ' + bioStr)
return False return False
bioStr += ' ' + removeHtml(actor_json['preferredUsername']) bioStr += ' ' + remove_html(actor_json['preferredUsername'])
if actor_json.get('attachment'): if actor_json.get('attachment'):
if isinstance(actor_json['attachment'], list): if isinstance(actor_json['attachment'], list):
@ -1724,7 +1724,7 @@ def validSendingActor(session, base_dir: str,
bioStr += ' ' + tag['value'] bioStr += ' ' + tag['value']
if actor_json.get('name'): if actor_json.get('name'):
bioStr += ' ' + removeHtml(actor_json['name']) bioStr += ' ' + remove_html(actor_json['name'])
if containsInvalidChars(bioStr): if containsInvalidChars(bioStr):
print('REJECT: post actor bio contains invalid characters') print('REJECT: post actor bio contains invalid characters')
return False return False

View File

@ -65,7 +65,7 @@ from utils import get_config_param
from utils import locateNewsVotes from utils import locateNewsVotes
from utils import locateNewsArrival from utils import locateNewsArrival
from utils import votesOnNewswireItem from utils import votesOnNewswireItem
from utils import removeHtml from utils import remove_html
from utils import dangerousMarkup from utils import dangerousMarkup
from utils import acct_dir from utils import acct_dir
from utils import local_actor_url from utils import local_actor_url
@ -714,7 +714,7 @@ def _updateWordFrequency(content: str, wordFrequency: {}) -> None:
"""Creates a dictionary containing words and the number of times """Creates a dictionary containing words and the number of times
that they appear that they appear
""" """
plainText = removeHtml(content) plainText = remove_html(content)
removeChars = ('.', ';', '?', '\n', ':') removeChars = ('.', ';', '?', '\n', ':')
for ch in removeChars: for ch in removeChars:
plainText = plainText.replace(ch, ' ') plainText = plainText.replace(ch, ' ')
@ -997,7 +997,7 @@ def _addSchedulePost(base_dir: str, nickname: str, domain: str,
def validContentWarning(cw: str) -> str: def validContentWarning(cw: str) -> str:
"""Returns a validated content warning """Returns a validated content warning
""" """
cw = removeHtml(cw) cw = remove_html(cw)
# hashtags within content warnings apparently cause a lot of trouble # hashtags within content warnings apparently cause a lot of trouble
# so remove them # so remove them
if '#' in cw: if '#' in cw:

View File

@ -19,7 +19,7 @@ from utils import getDomainFromActor
from utils import getNicknameFromActor from utils import getNicknameFromActor
from utils import getGenderFromBio from utils import getGenderFromBio
from utils import getDisplayName from utils import getDisplayName
from utils import removeHtml from utils import remove_html
from utils import load_json from utils import load_json
from utils import save_json from utils import save_json
from utils import is_pgp_encrypted from utils import is_pgp_encrypted
@ -388,7 +388,7 @@ def speakableText(base_dir: str, content: str, translate: {}) -> (str, []):
# replace some emoji before removing html # replace some emoji before removing html
if ' <3' in content: if ' <3' in content:
content = content.replace(' <3', ' ' + translate['heart']) content = content.replace(' <3', ' ' + translate['heart'])
content = removeHtml(htmlReplaceQuoteMarks(content)) content = remove_html(htmlReplaceQuoteMarks(content))
detectedLinks = [] detectedLinks = []
content = speakerReplaceLinks(content, translate, detectedLinks) content = speakerReplaceLinks(content, translate, detectedLinks)
# replace all double spaces # replace all double spaces
@ -426,7 +426,7 @@ def _postToSpeakerJson(base_dir: str, http_prefix: str,
# replace some emoji before removing html # replace some emoji before removing html
if ' <3' in content: if ' <3' in content:
content = content.replace(' <3', ' ' + translate['heart']) content = content.replace(' <3', ' ' + translate['heart'])
content = removeHtml(htmlReplaceQuoteMarks(content)) content = remove_html(htmlReplaceQuoteMarks(content))
content = speakerReplaceLinks(content, translate, detectedLinks) content = speakerReplaceLinks(content, translate, detectedLinks)
# replace all double spaces # replace all double spaces
while ' ' in content: while ' ' in content:

View File

@ -82,7 +82,7 @@ from utils import load_json
from utils import save_json from utils import save_json
from utils import getStatusNumber from utils import getStatusNumber
from utils import get_followers_of_person from utils import get_followers_of_person
from utils import removeHtml from utils import remove_html
from utils import dangerousMarkup from utils import dangerousMarkup
from utils import acct_dir from utils import acct_dir
from pgp import extractPGPPublicKey from pgp import extractPGPPublicKey
@ -141,7 +141,7 @@ from content import addHtmlTags
from content import removeLongWords from content import removeLongWords
from content import replaceContentDuplicates from content import replaceContentDuplicates
from content import removeTextFormatting from content import removeTextFormatting
from content import removeHtmlTag from content import remove_htmlTag
from theme import updateDefaultThemesList from theme import updateDefaultThemesList
from theme import setCSSparam from theme import setCSSparam
from theme import scanThemesForScripts from theme import scanThemesForScripts
@ -3655,17 +3655,17 @@ def _testSiteIsActive():
def _testRemoveHtml(): def _testRemoveHtml():
print('testRemoveHtml') print('testRemoveHtml')
testStr = 'This string has no html.' testStr = 'This string has no html.'
assert(removeHtml(testStr) == testStr) assert(remove_html(testStr) == testStr)
testStr = 'This string <a href="1234.567">has html</a>.' testStr = 'This string <a href="1234.567">has html</a>.'
assert(removeHtml(testStr) == 'This string has html.') assert(remove_html(testStr) == 'This string has html.')
testStr = '<label>This string has.</label><label>Two labels.</label>' testStr = '<label>This string has.</label><label>Two labels.</label>'
assert(removeHtml(testStr) == 'This string has. Two labels.') assert(remove_html(testStr) == 'This string has. Two labels.')
testStr = '<p>This string has.</p><p>Two paragraphs.</p>' testStr = '<p>This string has.</p><p>Two paragraphs.</p>'
assert(removeHtml(testStr) == 'This string has.\n\nTwo paragraphs.') assert(remove_html(testStr) == 'This string has.\n\nTwo paragraphs.')
testStr = 'This string has.<br>A new line.' testStr = 'This string has.<br>A new line.'
assert(removeHtml(testStr) == 'This string has.\nA new line.') assert(remove_html(testStr) == 'This string has.\nA new line.')
testStr = '<p>This string contains a url http://somesite.or.other</p>' testStr = '<p>This string contains a url http://somesite.or.other</p>'
assert(removeHtml(testStr) == assert(remove_html(testStr) ==
'This string contains a url http://somesite.or.other') 'This string contains a url http://somesite.or.other')
@ -3998,7 +3998,7 @@ def _testRemoveHtmlTag():
print('testRemoveHtmlTag') print('testRemoveHtmlTag')
testStr = "<p><img width=\"864\" height=\"486\" " + \ testStr = "<p><img width=\"864\" height=\"486\" " + \
"src=\"https://somesiteorother.com/image.jpg\"></p>" "src=\"https://somesiteorother.com/image.jpg\"></p>"
resultStr = removeHtmlTag(testStr, 'width') resultStr = remove_htmlTag(testStr, 'width')
assert resultStr == "<p><img height=\"486\" " + \ assert resultStr == "<p><img height=\"486\" " + \
"src=\"https://somesiteorother.com/image.jpg\"></p>" "src=\"https://somesiteorother.com/image.jpg\"></p>"
@ -5622,7 +5622,7 @@ def _testGetLinksFromContent():
assert links.get('another') assert links.get('another')
assert links['another'] == link2 assert links['another'] == link2
contentPlain = '<p>' + removeHtml(content) + '</p>' contentPlain = '<p>' + remove_html(content) + '</p>'
assert '>@linked</a>' not in contentPlain assert '>@linked</a>' not in contentPlain
content = addLinksToContent(contentPlain, links) content = addLinksToContent(contentPlain, links)
assert '>@linked</a>' in content assert '>@linked</a>' in content

View File

@ -440,7 +440,7 @@ def getMediaFormats() -> str:
return mediaFormats return mediaFormats
def removeHtml(content: str) -> str: def remove_html(content: str) -> str:
"""Removes html links from the given content. """Removes html links from the given content.
Used to ensure that profile descriptions don't contain dubious content Used to ensure that profile descriptions don't contain dubious content
""" """
@ -479,11 +479,11 @@ def firstParagraphFromString(content: str) -> str:
to be used as a summary in the newswire feed to be used as a summary in the newswire feed
""" """
if '<p>' not in content or '</p>' not in content: if '<p>' not in content or '</p>' not in content:
return removeHtml(content) return remove_html(content)
paragraph = content.split('<p>')[1] paragraph = content.split('<p>')[1]
if '</p>' in paragraph: if '</p>' in paragraph:
paragraph = paragraph.split('</p>')[0] paragraph = paragraph.split('</p>')[0]
return removeHtml(paragraph) return remove_html(paragraph)
def is_system_account(nickname: str) -> bool: def is_system_account(nickname: str) -> bool:

View File

@ -13,7 +13,7 @@ from content import removeLongWords
from content import limitRepeatedWords from content import limitRepeatedWords
from utils import get_fav_filename_from_url from utils import get_fav_filename_from_url
from utils import get_base_content_from_post from utils import get_base_content_from_post
from utils import removeHtml from utils import remove_html
from utils import locate_post from utils import locate_post
from utils import load_json from utils import load_json
from utils import votesOnNewswireItem from utils import votesOnNewswireItem
@ -221,7 +221,7 @@ def _htmlNewswire(base_dir: str, newswire: {}, nickname: str, moderator: bool,
separatorStr = htmlPostSeparator(base_dir, 'right') separatorStr = htmlPostSeparator(base_dir, 'right')
htmlStr = '' htmlStr = ''
for dateStr, item in newswire.items(): for dateStr, item in newswire.items():
item[0] = removeHtml(item[0]).strip() item[0] = remove_html(item[0]).strip()
if not item[0]: if not item[0]:
continue continue
# remove any CDATA # remove any CDATA
@ -408,7 +408,7 @@ def htmlCitations(base_dir: str, nickname: str, domain: str,
if newswire: if newswire:
ctr = 0 ctr = 0
for dateStr, item in newswire.items(): for dateStr, item in newswire.items():
item[0] = removeHtml(item[0]).strip() item[0] = remove_html(item[0]).strip()
if not item[0]: if not item[0]:
continue continue
# remove any CDATA # remove any CDATA

View File

@ -15,7 +15,7 @@ from posts import isModerator
from utils import get_full_domain from utils import get_full_domain
from utils import get_config_param from utils import get_config_param
from utils import is_dormant from utils import is_dormant
from utils import removeHtml from utils import remove_html
from utils import getDomainFromActor from utils import getDomainFromActor
from utils import getNicknameFromActor from utils import getNicknameFromActor
from utils import is_featured_writer from utils import is_featured_writer
@ -191,51 +191,51 @@ def htmlPersonOptions(defaultTimeline: str,
optionsStr += \ optionsStr += \
'<p class="imText">' + translate['Email'] + \ '<p class="imText">' + translate['Email'] + \
': <a href="mailto:' + \ ': <a href="mailto:' + \
emailAddress + '">' + removeHtml(emailAddress) + '</a></p>\n' emailAddress + '">' + remove_html(emailAddress) + '</a></p>\n'
if xmppAddress: if xmppAddress:
optionsStr += \ optionsStr += \
'<p class="imText">' + translate['XMPP'] + \ '<p class="imText">' + translate['XMPP'] + \
': <a href="xmpp:' + removeHtml(xmppAddress) + '">' + \ ': <a href="xmpp:' + remove_html(xmppAddress) + '">' + \
xmppAddress + '</a></p>\n' xmppAddress + '</a></p>\n'
if matrixAddress: if matrixAddress:
optionsStr += \ optionsStr += \
'<p class="imText">' + translate['Matrix'] + ': ' + \ '<p class="imText">' + translate['Matrix'] + ': ' + \
removeHtml(matrixAddress) + '</p>\n' remove_html(matrixAddress) + '</p>\n'
if ssbAddress: if ssbAddress:
optionsStr += \ optionsStr += \
'<p class="imText">SSB: ' + removeHtml(ssbAddress) + '</p>\n' '<p class="imText">SSB: ' + remove_html(ssbAddress) + '</p>\n'
if blogAddress: if blogAddress:
optionsStr += \ optionsStr += \
'<p class="imText">Blog: <a href="' + \ '<p class="imText">Blog: <a href="' + \
removeHtml(blogAddress) + '">' + \ remove_html(blogAddress) + '">' + \
removeHtml(blogAddress) + '</a></p>\n' remove_html(blogAddress) + '</a></p>\n'
if toxAddress: if toxAddress:
optionsStr += \ optionsStr += \
'<p class="imText">Tox: ' + removeHtml(toxAddress) + '</p>\n' '<p class="imText">Tox: ' + remove_html(toxAddress) + '</p>\n'
if briarAddress: if briarAddress:
if briarAddress.startswith('briar://'): if briarAddress.startswith('briar://'):
optionsStr += \ optionsStr += \
'<p class="imText">' + \ '<p class="imText">' + \
removeHtml(briarAddress) + '</p>\n' remove_html(briarAddress) + '</p>\n'
else: else:
optionsStr += \ optionsStr += \
'<p class="imText">briar://' + \ '<p class="imText">briar://' + \
removeHtml(briarAddress) + '</p>\n' remove_html(briarAddress) + '</p>\n'
if jamiAddress: if jamiAddress:
optionsStr += \ optionsStr += \
'<p class="imText">Jami: ' + removeHtml(jamiAddress) + '</p>\n' '<p class="imText">Jami: ' + remove_html(jamiAddress) + '</p>\n'
if cwtchAddress: if cwtchAddress:
optionsStr += \ optionsStr += \
'<p class="imText">Cwtch: ' + removeHtml(cwtchAddress) + '</p>\n' '<p class="imText">Cwtch: ' + remove_html(cwtchAddress) + '</p>\n'
if EnigmaPubKey: if EnigmaPubKey:
optionsStr += \ optionsStr += \
'<p class="imText">Enigma: ' + removeHtml(EnigmaPubKey) + '</p>\n' '<p class="imText">Enigma: ' + remove_html(EnigmaPubKey) + '</p>\n'
if PGPfingerprint: if PGPfingerprint:
optionsStr += '<p class="pgp">PGP: ' + \ optionsStr += '<p class="pgp">PGP: ' + \
removeHtml(PGPfingerprint).replace('\n', '<br>') + '</p>\n' remove_html(PGPfingerprint).replace('\n', '<br>') + '</p>\n'
if PGPpubKey: if PGPpubKey:
optionsStr += '<p class="pgp">' + \ optionsStr += '<p class="pgp">' + \
removeHtml(PGPpubKey).replace('\n', '<br>') + '</p>\n' remove_html(PGPpubKey).replace('\n', '<br>') + '</p>\n'
optionsStr += ' <form method="POST" action="' + \ optionsStr += ' <form method="POST" action="' + \
originPathStr + '/personoptions">\n' originPathStr + '/personoptions">\n'
optionsStr += ' <input type="hidden" name="pageNumber" value="' + \ optionsStr += ' <input type="hidden" name="pageNumber" value="' + \

View File

@ -24,7 +24,7 @@ from posts import getPersonBox
from posts import downloadAnnounce from posts import downloadAnnounce
from posts import populateRepliesJson from posts import populateRepliesJson
from utils import removeHashFromPostId from utils import removeHashFromPostId
from utils import removeHtml from utils import remove_html
from utils import get_actor_languages_list from utils import get_actor_languages_list
from utils import get_base_content_from_post from utils import get_base_content_from_post
from utils import get_content_from_post from utils import get_content_from_post
@ -114,7 +114,7 @@ def _htmlPostMetadataOpenGraph(domain: str, post_json_object: {}) -> str:
"\" property=\"og:published_time\" />\n" "\" property=\"og:published_time\" />\n"
if not objJson.get('attachment') or objJson.get('sensitive'): if not objJson.get('attachment') or objJson.get('sensitive'):
if objJson.get('content') and not objJson.get('sensitive'): if objJson.get('content') and not objJson.get('sensitive'):
description = removeHtml(objJson['content']) description = remove_html(objJson['content'])
metadata += \ metadata += \
" <meta content=\"" + description + \ " <meta content=\"" + description + \
"\" name=\"description\">\n" "\" name=\"description\">\n"
@ -142,7 +142,7 @@ def _htmlPostMetadataOpenGraph(domain: str, post_json_object: {}) -> str:
description = 'Attached: 1 audio' description = 'Attached: 1 audio'
if description: if description:
if objJson.get('content') and not objJson.get('sensitive'): if objJson.get('content') and not objJson.get('sensitive'):
description += '\n\n' + removeHtml(objJson['content']) description += '\n\n' + remove_html(objJson['content'])
metadata += \ metadata += \
" <meta content=\"" + description + \ " <meta content=\"" + description + \
"\" name=\"description\">\n" "\" name=\"description\">\n"

View File

@ -21,7 +21,7 @@ from utils import is_dormant
from utils import getNicknameFromActor from utils import getNicknameFromActor
from utils import getDomainFromActor from utils import getDomainFromActor
from utils import is_system_account from utils import is_system_account
from utils import removeHtml from utils import remove_html
from utils import load_json from utils import load_json
from utils import get_config_param from utils import get_config_param
from utils import get_image_formats from utils import get_image_formats
@ -242,7 +242,7 @@ def htmlProfileAfterSearch(cssCache: {},
avatarDescription = avatarDescription.replace('<p>', '') avatarDescription = avatarDescription.replace('<p>', '')
avatarDescription = avatarDescription.replace('</p>', '') avatarDescription = avatarDescription.replace('</p>', '')
if '<' in avatarDescription: if '<' in avatarDescription:
avatarDescription = removeHtml(avatarDescription) avatarDescription = remove_html(avatarDescription)
imageUrl = '' imageUrl = ''
if profile_json.get('image'): if profile_json.get('image'):

View File

@ -12,7 +12,7 @@ from shutil import copyfile
from collections import OrderedDict from collections import OrderedDict
from session import getJson from session import getJson
from utils import is_account_dir from utils import is_account_dir
from utils import removeHtml from utils import remove_html
from utils import getProtocolPrefixes from utils import getProtocolPrefixes
from utils import load_json from utils import load_json
from utils import get_cached_post_filename from utils import get_cached_post_filename
@ -230,7 +230,7 @@ def _setActorPropertyUrl(actor_json: {}, property_name: str, url: str) -> None:
def setBlogAddress(actor_json: {}, blogAddress: str) -> None: def setBlogAddress(actor_json: {}, blogAddress: str) -> None:
"""Sets an blog address for the given actor """Sets an blog address for the given actor
""" """
_setActorPropertyUrl(actor_json, 'Blog', removeHtml(blogAddress)) _setActorPropertyUrl(actor_json, 'Blog', remove_html(blogAddress))
def updateAvatarImageCache(signing_priv_key_pem: str, def updateAvatarImageCache(signing_priv_key_pem: str,
@ -650,8 +650,8 @@ def htmlHeaderWithPersonMarkup(cssFilename: str, instanceTitle: str,
firstEntry = False firstEntry = False
skillsMarkup += '\n ],\n' skillsMarkup += '\n ],\n'
description = removeHtml(actor_json['summary']) description = remove_html(actor_json['summary'])
nameStr = removeHtml(actor_json['name']) nameStr = remove_html(actor_json['name'])
domain_full = actor_json['id'].split('://')[1].split('/')[0] domain_full = actor_json['id'].split('://')[1].split('/')[0]
handle = actor_json['preferredUsername'] + '@' + domain_full handle = actor_json['preferredUsername'] + '@' + domain_full
@ -689,7 +689,7 @@ def htmlHeaderWithPersonMarkup(cssFilename: str, instanceTitle: str,
' }\n' + \ ' }\n' + \
' </script>\n' ' </script>\n'
description = removeHtml(description) description = remove_html(description)
ogMetadata = \ ogMetadata = \
" <meta content=\"profile\" property=\"og:type\" />\n" + \ " <meta content=\"profile\" property=\"og:type\" />\n" + \
" <meta content=\"" + description + \ " <meta content=\"" + description + \

View File

@ -10,7 +10,7 @@ __module_group__ = "Onboarding"
import os import os
from shutil import copyfile from shutil import copyfile
from utils import get_config_param from utils import get_config_param
from utils import removeHtml from utils import remove_html
from utils import acct_dir from utils import acct_dir
from webapp_utils import htmlHeaderWithExternalStyle from webapp_utils import htmlHeaderWithExternalStyle
from webapp_utils import htmlFooter from webapp_utils import htmlFooter
@ -79,7 +79,7 @@ def htmlWelcomeScreen(base_dir: str, nickname: str,
with open(welcomeFilename, 'r') as welcomeFile: with open(welcomeFilename, 'r') as welcomeFile:
welcomeText = welcomeFile.read() welcomeText = welcomeFile.read()
welcomeText = welcomeText.replace('INSTANCE', instanceTitle) welcomeText = welcomeText.replace('INSTANCE', instanceTitle)
welcomeText = markdownToHtml(removeHtml(welcomeText)) welcomeText = markdownToHtml(remove_html(welcomeText))
welcomeForm = '' welcomeForm = ''
cssFilename = base_dir + '/epicyon-welcome.css' cssFilename = base_dir + '/epicyon-welcome.css'

View File

@ -9,7 +9,7 @@ __module_group__ = "Onboarding"
import os import os
from shutil import copyfile from shutil import copyfile
from utils import removeHtml from utils import remove_html
from utils import get_config_param from utils import get_config_param
from webapp_utils import htmlHeaderWithExternalStyle from webapp_utils import htmlHeaderWithExternalStyle
from webapp_utils import htmlFooter from webapp_utils import htmlFooter
@ -54,7 +54,7 @@ def htmlWelcomeFinal(base_dir: str, nickname: str, domain: str,
with open(finalFilename, 'r') as finalFile: with open(finalFilename, 'r') as finalFile:
finalText = finalFile.read() finalText = finalFile.read()
finalText = finalText.replace('INSTANCE', instanceTitle) finalText = finalText.replace('INSTANCE', instanceTitle)
finalText = markdownToHtml(removeHtml(finalText)) finalText = markdownToHtml(remove_html(finalText))
finalForm = '' finalForm = ''
cssFilename = base_dir + '/epicyon-welcome.css' cssFilename = base_dir + '/epicyon-welcome.css'

View File

@ -9,7 +9,7 @@ __module_group__ = "Onboarding"
import os import os
from shutil import copyfile from shutil import copyfile
from utils import removeHtml from utils import remove_html
from utils import load_json from utils import load_json
from utils import get_config_param from utils import get_config_param
from utils import get_image_extensions from utils import get_image_extensions
@ -60,7 +60,7 @@ def htmlWelcomeProfile(base_dir: str, nickname: str, domain: str,
with open(profileFilename, 'r') as profileFile: with open(profileFilename, 'r') as profileFile:
profileText = profileFile.read() profileText = profileFile.read()
profileText = profileText.replace('INSTANCE', instanceTitle) profileText = profileText.replace('INSTANCE', instanceTitle)
profileText = markdownToHtml(removeHtml(profileText)) profileText = markdownToHtml(remove_html(profileText))
profileForm = '' profileForm = ''
cssFilename = base_dir + '/epicyon-welcome.css' cssFilename = base_dir + '/epicyon-welcome.css'