mirror of https://gitlab.com/bashrc2/epicyon
Snake case
parent
e2b7407e96
commit
501feef616
4
blog.py
4
blog.py
|
@ -22,7 +22,7 @@ from utils import get_actor_languages_list
|
|||
from utils import get_base_content_from_post
|
||||
from utils import get_content_from_post
|
||||
from utils import is_account_dir
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import get_config_param
|
||||
from utils import get_full_domain
|
||||
from utils import getMediaFormats
|
||||
|
@ -434,7 +434,7 @@ def _getSnippetFromBlogContent(post_json_object: {},
|
|||
content = content.split('<p>', 1)[1]
|
||||
if '</p>' in content:
|
||||
content = content.split('</p>', 1)[0]
|
||||
content = removeHtml(content)
|
||||
content = remove_html(content)
|
||||
if '\n' in content:
|
||||
content = content.split('\n')[0]
|
||||
if len(content) >= 256:
|
||||
|
|
|
@ -25,12 +25,12 @@ from utils import contains_pgp_public_key
|
|||
from utils import acct_dir
|
||||
from utils import is_float
|
||||
from utils import get_currencies
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from petnames import getPetName
|
||||
from session import downloadImage
|
||||
|
||||
|
||||
def removeHtmlTag(htmlStr: str, tag: str) -> str:
|
||||
def remove_htmlTag(htmlStr: str, tag: str) -> str:
|
||||
"""Removes a given tag from a html string
|
||||
"""
|
||||
tagFound = True
|
||||
|
@ -1253,7 +1253,7 @@ def _wordsSimilarityWordsList(content: str) -> []:
|
|||
"""Returns a list of words for the given content
|
||||
"""
|
||||
removePunctuation = ('.', ',', ';', '-', ':', '"')
|
||||
content = removeHtml(content).lower()
|
||||
content = remove_html(content).lower()
|
||||
for p in removePunctuation:
|
||||
content = content.replace(p, ' ')
|
||||
content = content.replace(' ', ' ')
|
||||
|
|
|
@ -273,7 +273,7 @@ from utils import isPublicPost
|
|||
from utils import get_locked_account
|
||||
from utils import has_users_path
|
||||
from utils import get_full_domain
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import is_editor
|
||||
from utils import is_artist
|
||||
from utils import get_image_extensions
|
||||
|
@ -4998,7 +4998,7 @@ class PubServer(BaseHTTPRequestHandler):
|
|||
if fields.get('displayNickname'):
|
||||
if fields['displayNickname'] != actor_json['name']:
|
||||
displayName = \
|
||||
removeHtml(fields['displayNickname'])
|
||||
remove_html(fields['displayNickname'])
|
||||
if not isFiltered(base_dir,
|
||||
nickname, domain,
|
||||
displayName):
|
||||
|
@ -5477,7 +5477,7 @@ class PubServer(BaseHTTPRequestHandler):
|
|||
occupationName = get_occupation_name(actor_json)
|
||||
if fields.get('occupationName'):
|
||||
fields['occupationName'] = \
|
||||
removeHtml(fields['occupationName'])
|
||||
remove_html(fields['occupationName'])
|
||||
if occupationName != \
|
||||
fields['occupationName']:
|
||||
set_occupation_name(actor_json,
|
||||
|
@ -5524,7 +5524,7 @@ class PubServer(BaseHTTPRequestHandler):
|
|||
# change user bio
|
||||
if fields.get('bio'):
|
||||
if fields['bio'] != actor_json['summary']:
|
||||
bioStr = removeHtml(fields['bio'])
|
||||
bioStr = remove_html(fields['bio'])
|
||||
if not isFiltered(base_dir,
|
||||
nickname, domain, bioStr):
|
||||
actorTags = {}
|
||||
|
|
|
@ -21,7 +21,7 @@ from utils import has_object_dict
|
|||
from utils import get_full_domain
|
||||
from utils import is_dm
|
||||
from utils import load_translations_from_file
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import getNicknameFromActor
|
||||
from utils import getDomainFromActor
|
||||
from utils import is_pgp_encrypted
|
||||
|
@ -586,7 +586,7 @@ def _textOnlyContent(content: str) -> str:
|
|||
"""
|
||||
content = urllib.parse.unquote_plus(content)
|
||||
content = html.unescape(content)
|
||||
return removeHtml(content)
|
||||
return remove_html(content)
|
||||
|
||||
|
||||
def _getImageDescription(post_json_object: {}) -> str:
|
||||
|
@ -827,7 +827,7 @@ def _desktopShowActor(base_dir: str, actor_json: {}, translate: {},
|
|||
sayStr = 'Also known as ' + html.unescape(alsoKnownAsStr)
|
||||
_sayCommand(sayStr, sayStr, screenreader, system_language, espeak)
|
||||
if actor_json.get('summary'):
|
||||
sayStr = html.unescape(removeHtml(actor_json['summary']))
|
||||
sayStr = html.unescape(remove_html(actor_json['summary']))
|
||||
sayStr = sayStr.replace('"', "'")
|
||||
sayStr2 = speakableText(base_dir, sayStr, translate)[0]
|
||||
_sayCommand(sayStr, sayStr2, screenreader, system_language, espeak)
|
||||
|
|
6
inbox.py
6
inbox.py
|
@ -21,7 +21,7 @@ from utils import domainPermitted
|
|||
from utils import is_group_account
|
||||
from utils import is_system_account
|
||||
from utils import invalid_ciphertext
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import fileLastModified
|
||||
from utils import has_object_string
|
||||
from utils import has_object_string_object
|
||||
|
@ -1290,7 +1290,7 @@ def _receiveReaction(recent_posts_cache: {},
|
|||
handleDom = handle.split('@')[1]
|
||||
|
||||
postReactionId = message_json['object']
|
||||
emojiContent = removeHtml(message_json['content'])
|
||||
emojiContent = remove_html(message_json['content'])
|
||||
if not emojiContent:
|
||||
if debug:
|
||||
print('DEBUG: emoji reaction has no content')
|
||||
|
@ -1441,7 +1441,7 @@ def _receiveUndoReaction(recent_posts_cache: {},
|
|||
print('DEBUG: reaction post found in inbox. Now undoing.')
|
||||
reactionActor = message_json['actor']
|
||||
postReactionId = message_json['object']
|
||||
emojiContent = removeHtml(message_json['object']['content'])
|
||||
emojiContent = remove_html(message_json['object']['content'])
|
||||
if not emojiContent:
|
||||
if debug:
|
||||
print('DEBUG: unreaction has no content')
|
||||
|
|
|
@ -11,7 +11,7 @@ import os
|
|||
import json
|
||||
from urllib import request, parse
|
||||
from utils import get_actor_languages_list
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import has_object_dict
|
||||
from utils import get_config_param
|
||||
from utils import local_actor_url
|
||||
|
@ -240,7 +240,7 @@ def libretranslate(url: str, text: str,
|
|||
links = getLinksFromContent(text)
|
||||
|
||||
# LibreTranslate doesn't like markup
|
||||
text = removeHtml(text)
|
||||
text = remove_html(text)
|
||||
|
||||
# remove any links from plain text version of the content
|
||||
for _, url in links.items():
|
||||
|
@ -302,7 +302,7 @@ def autoTranslatePost(base_dir: str, post_json_object: {},
|
|||
lang, system_language,
|
||||
libretranslateApiKey)
|
||||
if translatedText:
|
||||
if removeHtml(translatedText) == removeHtml(content):
|
||||
if remove_html(translatedText) == remove_html(content):
|
||||
return content
|
||||
translatedText = \
|
||||
'<p>' + translate['Translated'].upper() + '</p>' + \
|
||||
|
|
|
@ -26,7 +26,7 @@ from posts import createNewsPost
|
|||
from posts import archivePostsForPerson
|
||||
from content import validHashTag
|
||||
from utils import get_base_content_from_post
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import get_full_domain
|
||||
from utils import load_json
|
||||
from utils import save_json
|
||||
|
@ -605,7 +605,7 @@ def _convertRSStoActivityPub(base_dir: str, http_prefix: str,
|
|||
rssDescription = ''
|
||||
|
||||
# get the rss description if it exists
|
||||
rssDescription = '<p>' + removeHtml(item[4]) + '<p>'
|
||||
rssDescription = '<p>' + remove_html(item[4]) + '<p>'
|
||||
|
||||
mirrored = item[7]
|
||||
postUrl = url
|
||||
|
|
38
newswire.py
38
newswire.py
|
@ -29,7 +29,7 @@ from utils import load_json
|
|||
from utils import save_json
|
||||
from utils import is_suspended
|
||||
from utils import containsInvalidChars
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import is_account_dir
|
||||
from utils import acct_dir
|
||||
from utils import local_actor_url
|
||||
|
@ -208,8 +208,8 @@ def _addNewswireDictEntry(base_dir: str, domain: str,
|
|||
"""Update the newswire dictionary
|
||||
"""
|
||||
# remove any markup
|
||||
title = removeHtml(title)
|
||||
description = removeHtml(description)
|
||||
title = remove_html(title)
|
||||
description = remove_html(description)
|
||||
|
||||
allText = title + ' ' + description
|
||||
|
||||
|
@ -419,17 +419,17 @@ def _xml2StrToDict(base_dir: str, domain: str, xmlStr: str,
|
|||
continue
|
||||
title = rssItem.split('<title>')[1]
|
||||
title = _removeCDATA(title.split('</title>')[0])
|
||||
title = removeHtml(title)
|
||||
title = remove_html(title)
|
||||
description = ''
|
||||
if '<description>' in rssItem and '</description>' in rssItem:
|
||||
description = rssItem.split('<description>')[1]
|
||||
description = removeHtml(description.split('</description>')[0])
|
||||
description = remove_html(description.split('</description>')[0])
|
||||
else:
|
||||
if '<media:description>' in rssItem and \
|
||||
'</media:description>' in rssItem:
|
||||
description = rssItem.split('<media:description>')[1]
|
||||
description = description.split('</media:description>')[0]
|
||||
description = removeHtml(description)
|
||||
description = remove_html(description)
|
||||
link = rssItem.split('<link>')[1]
|
||||
link = link.split('</link>')[0]
|
||||
if '://' not in link:
|
||||
|
@ -507,17 +507,17 @@ def _xml1StrToDict(base_dir: str, domain: str, xmlStr: str,
|
|||
continue
|
||||
title = rssItem.split('<title>')[1]
|
||||
title = _removeCDATA(title.split('</title>')[0])
|
||||
title = removeHtml(title)
|
||||
title = remove_html(title)
|
||||
description = ''
|
||||
if '<description>' in rssItem and '</description>' in rssItem:
|
||||
description = rssItem.split('<description>')[1]
|
||||
description = removeHtml(description.split('</description>')[0])
|
||||
description = remove_html(description.split('</description>')[0])
|
||||
else:
|
||||
if '<media:description>' in rssItem and \
|
||||
'</media:description>' in rssItem:
|
||||
description = rssItem.split('<media:description>')[1]
|
||||
description = description.split('</media:description>')[0]
|
||||
description = removeHtml(description)
|
||||
description = remove_html(description)
|
||||
link = rssItem.split('<link>')[1]
|
||||
link = link.split('</link>')[0]
|
||||
if '://' not in link:
|
||||
|
@ -583,17 +583,17 @@ def _atomFeedToDict(base_dir: str, domain: str, xmlStr: str,
|
|||
continue
|
||||
title = atomItem.split('<title>')[1]
|
||||
title = _removeCDATA(title.split('</title>')[0])
|
||||
title = removeHtml(title)
|
||||
title = remove_html(title)
|
||||
description = ''
|
||||
if '<summary>' in atomItem and '</summary>' in atomItem:
|
||||
description = atomItem.split('<summary>')[1]
|
||||
description = removeHtml(description.split('</summary>')[0])
|
||||
description = remove_html(description.split('</summary>')[0])
|
||||
else:
|
||||
if '<media:description>' in atomItem and \
|
||||
'</media:description>' in atomItem:
|
||||
description = atomItem.split('<media:description>')[1]
|
||||
description = description.split('</media:description>')[0]
|
||||
description = removeHtml(description)
|
||||
description = remove_html(description)
|
||||
link = atomItem.split('<link>')[1]
|
||||
link = link.split('</link>')[0]
|
||||
if '://' not in link:
|
||||
|
@ -670,11 +670,11 @@ def _jsonFeedV1ToDict(base_dir: str, domain: str, xmlStr: str,
|
|||
if jsonFeedItem.get('content_html'):
|
||||
if not isinstance(jsonFeedItem['content_html'], str):
|
||||
continue
|
||||
title = removeHtml(jsonFeedItem['content_html'])
|
||||
title = remove_html(jsonFeedItem['content_html'])
|
||||
else:
|
||||
if not isinstance(jsonFeedItem['content_text'], str):
|
||||
continue
|
||||
title = removeHtml(jsonFeedItem['content_text'])
|
||||
title = remove_html(jsonFeedItem['content_text'])
|
||||
if len(title) > maxBytes:
|
||||
print('WARN: json feed title is too long')
|
||||
continue
|
||||
|
@ -682,7 +682,7 @@ def _jsonFeedV1ToDict(base_dir: str, domain: str, xmlStr: str,
|
|||
if jsonFeedItem.get('description'):
|
||||
if not isinstance(jsonFeedItem['description'], str):
|
||||
continue
|
||||
description = removeHtml(jsonFeedItem['description'])
|
||||
description = remove_html(jsonFeedItem['description'])
|
||||
if len(description) > maxBytes:
|
||||
print('WARN: json feed description is too long')
|
||||
continue
|
||||
|
@ -780,11 +780,11 @@ def _atomFeedYTToDict(base_dir: str, domain: str, xmlStr: str,
|
|||
'</media:description>' in atomItem:
|
||||
description = atomItem.split('<media:description>')[1]
|
||||
description = description.split('</media:description>')[0]
|
||||
description = removeHtml(description)
|
||||
description = remove_html(description)
|
||||
elif '<summary>' in atomItem and '</summary>' in atomItem:
|
||||
description = atomItem.split('<summary>')[1]
|
||||
description = description.split('</summary>')[0]
|
||||
description = removeHtml(description)
|
||||
description = remove_html(description)
|
||||
link = atomItem.split('<yt:videoId>')[1]
|
||||
link = link.split('</yt:videoId>')[0]
|
||||
link = 'https://www.youtube.com/watch?v=' + link.strip()
|
||||
|
@ -946,7 +946,7 @@ def getRSSfromDict(base_dir: str, newswire: {},
|
|||
rssStr += \
|
||||
'<item>\n' + \
|
||||
' <title>' + fields[0] + '</title>\n'
|
||||
description = removeHtml(firstParagraphFromString(fields[4]))
|
||||
description = remove_html(firstParagraphFromString(fields[4]))
|
||||
rssStr += ' <description>' + description + '</description>\n'
|
||||
url = fields[1]
|
||||
if '://' not in url:
|
||||
|
@ -1065,7 +1065,7 @@ def _addAccountBlogsToNewswire(base_dir: str, nickname: str, domain: str,
|
|||
get_base_content_from_post(post_json_object,
|
||||
system_language)
|
||||
description = firstParagraphFromString(content)
|
||||
description = removeHtml(description)
|
||||
description = remove_html(description)
|
||||
tagsFromPost = _getHashtagsFromPost(post_json_object)
|
||||
summary = post_json_object['object']['summary']
|
||||
_addNewswireDictEntry(base_dir, domain,
|
||||
|
|
|
@ -38,7 +38,7 @@ from roles import setRole
|
|||
from roles import setRolesFromList
|
||||
from roles import getActorRolesList
|
||||
from media import processMetaData
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import containsInvalidChars
|
||||
from utils import replace_users_with_at
|
||||
from utils import remove_line_endings
|
||||
|
@ -1696,7 +1696,7 @@ def validSendingActor(session, base_dir: str,
|
|||
if not unit_test:
|
||||
bioStr = ''
|
||||
if actor_json.get('summary'):
|
||||
bioStr = removeHtml(actor_json['summary']).strip()
|
||||
bioStr = remove_html(actor_json['summary']).strip()
|
||||
if not bioStr:
|
||||
# allow no bio if it's an actor in this instance
|
||||
if domain not in sendingActor:
|
||||
|
@ -1707,7 +1707,7 @@ def validSendingActor(session, base_dir: str,
|
|||
print('REJECT: actor bio is not long enough ' +
|
||||
sendingActor + ' ' + bioStr)
|
||||
return False
|
||||
bioStr += ' ' + removeHtml(actor_json['preferredUsername'])
|
||||
bioStr += ' ' + remove_html(actor_json['preferredUsername'])
|
||||
|
||||
if actor_json.get('attachment'):
|
||||
if isinstance(actor_json['attachment'], list):
|
||||
|
@ -1724,7 +1724,7 @@ def validSendingActor(session, base_dir: str,
|
|||
bioStr += ' ' + tag['value']
|
||||
|
||||
if actor_json.get('name'):
|
||||
bioStr += ' ' + removeHtml(actor_json['name'])
|
||||
bioStr += ' ' + remove_html(actor_json['name'])
|
||||
if containsInvalidChars(bioStr):
|
||||
print('REJECT: post actor bio contains invalid characters')
|
||||
return False
|
||||
|
|
6
posts.py
6
posts.py
|
@ -65,7 +65,7 @@ from utils import get_config_param
|
|||
from utils import locateNewsVotes
|
||||
from utils import locateNewsArrival
|
||||
from utils import votesOnNewswireItem
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import dangerousMarkup
|
||||
from utils import acct_dir
|
||||
from utils import local_actor_url
|
||||
|
@ -714,7 +714,7 @@ def _updateWordFrequency(content: str, wordFrequency: {}) -> None:
|
|||
"""Creates a dictionary containing words and the number of times
|
||||
that they appear
|
||||
"""
|
||||
plainText = removeHtml(content)
|
||||
plainText = remove_html(content)
|
||||
removeChars = ('.', ';', '?', '\n', ':')
|
||||
for ch in removeChars:
|
||||
plainText = plainText.replace(ch, ' ')
|
||||
|
@ -997,7 +997,7 @@ def _addSchedulePost(base_dir: str, nickname: str, domain: str,
|
|||
def validContentWarning(cw: str) -> str:
|
||||
"""Returns a validated content warning
|
||||
"""
|
||||
cw = removeHtml(cw)
|
||||
cw = remove_html(cw)
|
||||
# hashtags within content warnings apparently cause a lot of trouble
|
||||
# so remove them
|
||||
if '#' in cw:
|
||||
|
|
|
@ -19,7 +19,7 @@ from utils import getDomainFromActor
|
|||
from utils import getNicknameFromActor
|
||||
from utils import getGenderFromBio
|
||||
from utils import getDisplayName
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import load_json
|
||||
from utils import save_json
|
||||
from utils import is_pgp_encrypted
|
||||
|
@ -388,7 +388,7 @@ def speakableText(base_dir: str, content: str, translate: {}) -> (str, []):
|
|||
# replace some emoji before removing html
|
||||
if ' <3' in content:
|
||||
content = content.replace(' <3', ' ' + translate['heart'])
|
||||
content = removeHtml(htmlReplaceQuoteMarks(content))
|
||||
content = remove_html(htmlReplaceQuoteMarks(content))
|
||||
detectedLinks = []
|
||||
content = speakerReplaceLinks(content, translate, detectedLinks)
|
||||
# replace all double spaces
|
||||
|
@ -426,7 +426,7 @@ def _postToSpeakerJson(base_dir: str, http_prefix: str,
|
|||
# replace some emoji before removing html
|
||||
if ' <3' in content:
|
||||
content = content.replace(' <3', ' ' + translate['heart'])
|
||||
content = removeHtml(htmlReplaceQuoteMarks(content))
|
||||
content = remove_html(htmlReplaceQuoteMarks(content))
|
||||
content = speakerReplaceLinks(content, translate, detectedLinks)
|
||||
# replace all double spaces
|
||||
while ' ' in content:
|
||||
|
|
20
tests.py
20
tests.py
|
@ -82,7 +82,7 @@ from utils import load_json
|
|||
from utils import save_json
|
||||
from utils import getStatusNumber
|
||||
from utils import get_followers_of_person
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import dangerousMarkup
|
||||
from utils import acct_dir
|
||||
from pgp import extractPGPPublicKey
|
||||
|
@ -141,7 +141,7 @@ from content import addHtmlTags
|
|||
from content import removeLongWords
|
||||
from content import replaceContentDuplicates
|
||||
from content import removeTextFormatting
|
||||
from content import removeHtmlTag
|
||||
from content import remove_htmlTag
|
||||
from theme import updateDefaultThemesList
|
||||
from theme import setCSSparam
|
||||
from theme import scanThemesForScripts
|
||||
|
@ -3655,17 +3655,17 @@ def _testSiteIsActive():
|
|||
def _testRemoveHtml():
|
||||
print('testRemoveHtml')
|
||||
testStr = 'This string has no html.'
|
||||
assert(removeHtml(testStr) == testStr)
|
||||
assert(remove_html(testStr) == testStr)
|
||||
testStr = 'This string <a href="1234.567">has html</a>.'
|
||||
assert(removeHtml(testStr) == 'This string has html.')
|
||||
assert(remove_html(testStr) == 'This string has html.')
|
||||
testStr = '<label>This string has.</label><label>Two labels.</label>'
|
||||
assert(removeHtml(testStr) == 'This string has. Two labels.')
|
||||
assert(remove_html(testStr) == 'This string has. Two labels.')
|
||||
testStr = '<p>This string has.</p><p>Two paragraphs.</p>'
|
||||
assert(removeHtml(testStr) == 'This string has.\n\nTwo paragraphs.')
|
||||
assert(remove_html(testStr) == 'This string has.\n\nTwo paragraphs.')
|
||||
testStr = 'This string has.<br>A new line.'
|
||||
assert(removeHtml(testStr) == 'This string has.\nA new line.')
|
||||
assert(remove_html(testStr) == 'This string has.\nA new line.')
|
||||
testStr = '<p>This string contains a url http://somesite.or.other</p>'
|
||||
assert(removeHtml(testStr) ==
|
||||
assert(remove_html(testStr) ==
|
||||
'This string contains a url http://somesite.or.other')
|
||||
|
||||
|
||||
|
@ -3998,7 +3998,7 @@ def _testRemoveHtmlTag():
|
|||
print('testRemoveHtmlTag')
|
||||
testStr = "<p><img width=\"864\" height=\"486\" " + \
|
||||
"src=\"https://somesiteorother.com/image.jpg\"></p>"
|
||||
resultStr = removeHtmlTag(testStr, 'width')
|
||||
resultStr = remove_htmlTag(testStr, 'width')
|
||||
assert resultStr == "<p><img height=\"486\" " + \
|
||||
"src=\"https://somesiteorother.com/image.jpg\"></p>"
|
||||
|
||||
|
@ -5622,7 +5622,7 @@ def _testGetLinksFromContent():
|
|||
assert links.get('another')
|
||||
assert links['another'] == link2
|
||||
|
||||
contentPlain = '<p>' + removeHtml(content) + '</p>'
|
||||
contentPlain = '<p>' + remove_html(content) + '</p>'
|
||||
assert '>@linked</a>' not in contentPlain
|
||||
content = addLinksToContent(contentPlain, links)
|
||||
assert '>@linked</a>' in content
|
||||
|
|
6
utils.py
6
utils.py
|
@ -440,7 +440,7 @@ def getMediaFormats() -> str:
|
|||
return mediaFormats
|
||||
|
||||
|
||||
def removeHtml(content: str) -> str:
|
||||
def remove_html(content: str) -> str:
|
||||
"""Removes html links from the given content.
|
||||
Used to ensure that profile descriptions don't contain dubious content
|
||||
"""
|
||||
|
@ -479,11 +479,11 @@ def firstParagraphFromString(content: str) -> str:
|
|||
to be used as a summary in the newswire feed
|
||||
"""
|
||||
if '<p>' not in content or '</p>' not in content:
|
||||
return removeHtml(content)
|
||||
return remove_html(content)
|
||||
paragraph = content.split('<p>')[1]
|
||||
if '</p>' in paragraph:
|
||||
paragraph = paragraph.split('</p>')[0]
|
||||
return removeHtml(paragraph)
|
||||
return remove_html(paragraph)
|
||||
|
||||
|
||||
def is_system_account(nickname: str) -> bool:
|
||||
|
|
|
@ -13,7 +13,7 @@ from content import removeLongWords
|
|||
from content import limitRepeatedWords
|
||||
from utils import get_fav_filename_from_url
|
||||
from utils import get_base_content_from_post
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import locate_post
|
||||
from utils import load_json
|
||||
from utils import votesOnNewswireItem
|
||||
|
@ -221,7 +221,7 @@ def _htmlNewswire(base_dir: str, newswire: {}, nickname: str, moderator: bool,
|
|||
separatorStr = htmlPostSeparator(base_dir, 'right')
|
||||
htmlStr = ''
|
||||
for dateStr, item in newswire.items():
|
||||
item[0] = removeHtml(item[0]).strip()
|
||||
item[0] = remove_html(item[0]).strip()
|
||||
if not item[0]:
|
||||
continue
|
||||
# remove any CDATA
|
||||
|
@ -408,7 +408,7 @@ def htmlCitations(base_dir: str, nickname: str, domain: str,
|
|||
if newswire:
|
||||
ctr = 0
|
||||
for dateStr, item in newswire.items():
|
||||
item[0] = removeHtml(item[0]).strip()
|
||||
item[0] = remove_html(item[0]).strip()
|
||||
if not item[0]:
|
||||
continue
|
||||
# remove any CDATA
|
||||
|
|
|
@ -15,7 +15,7 @@ from posts import isModerator
|
|||
from utils import get_full_domain
|
||||
from utils import get_config_param
|
||||
from utils import is_dormant
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import getDomainFromActor
|
||||
from utils import getNicknameFromActor
|
||||
from utils import is_featured_writer
|
||||
|
@ -191,51 +191,51 @@ def htmlPersonOptions(defaultTimeline: str,
|
|||
optionsStr += \
|
||||
'<p class="imText">' + translate['Email'] + \
|
||||
': <a href="mailto:' + \
|
||||
emailAddress + '">' + removeHtml(emailAddress) + '</a></p>\n'
|
||||
emailAddress + '">' + remove_html(emailAddress) + '</a></p>\n'
|
||||
if xmppAddress:
|
||||
optionsStr += \
|
||||
'<p class="imText">' + translate['XMPP'] + \
|
||||
': <a href="xmpp:' + removeHtml(xmppAddress) + '">' + \
|
||||
': <a href="xmpp:' + remove_html(xmppAddress) + '">' + \
|
||||
xmppAddress + '</a></p>\n'
|
||||
if matrixAddress:
|
||||
optionsStr += \
|
||||
'<p class="imText">' + translate['Matrix'] + ': ' + \
|
||||
removeHtml(matrixAddress) + '</p>\n'
|
||||
remove_html(matrixAddress) + '</p>\n'
|
||||
if ssbAddress:
|
||||
optionsStr += \
|
||||
'<p class="imText">SSB: ' + removeHtml(ssbAddress) + '</p>\n'
|
||||
'<p class="imText">SSB: ' + remove_html(ssbAddress) + '</p>\n'
|
||||
if blogAddress:
|
||||
optionsStr += \
|
||||
'<p class="imText">Blog: <a href="' + \
|
||||
removeHtml(blogAddress) + '">' + \
|
||||
removeHtml(blogAddress) + '</a></p>\n'
|
||||
remove_html(blogAddress) + '">' + \
|
||||
remove_html(blogAddress) + '</a></p>\n'
|
||||
if toxAddress:
|
||||
optionsStr += \
|
||||
'<p class="imText">Tox: ' + removeHtml(toxAddress) + '</p>\n'
|
||||
'<p class="imText">Tox: ' + remove_html(toxAddress) + '</p>\n'
|
||||
if briarAddress:
|
||||
if briarAddress.startswith('briar://'):
|
||||
optionsStr += \
|
||||
'<p class="imText">' + \
|
||||
removeHtml(briarAddress) + '</p>\n'
|
||||
remove_html(briarAddress) + '</p>\n'
|
||||
else:
|
||||
optionsStr += \
|
||||
'<p class="imText">briar://' + \
|
||||
removeHtml(briarAddress) + '</p>\n'
|
||||
remove_html(briarAddress) + '</p>\n'
|
||||
if jamiAddress:
|
||||
optionsStr += \
|
||||
'<p class="imText">Jami: ' + removeHtml(jamiAddress) + '</p>\n'
|
||||
'<p class="imText">Jami: ' + remove_html(jamiAddress) + '</p>\n'
|
||||
if cwtchAddress:
|
||||
optionsStr += \
|
||||
'<p class="imText">Cwtch: ' + removeHtml(cwtchAddress) + '</p>\n'
|
||||
'<p class="imText">Cwtch: ' + remove_html(cwtchAddress) + '</p>\n'
|
||||
if EnigmaPubKey:
|
||||
optionsStr += \
|
||||
'<p class="imText">Enigma: ' + removeHtml(EnigmaPubKey) + '</p>\n'
|
||||
'<p class="imText">Enigma: ' + remove_html(EnigmaPubKey) + '</p>\n'
|
||||
if PGPfingerprint:
|
||||
optionsStr += '<p class="pgp">PGP: ' + \
|
||||
removeHtml(PGPfingerprint).replace('\n', '<br>') + '</p>\n'
|
||||
remove_html(PGPfingerprint).replace('\n', '<br>') + '</p>\n'
|
||||
if PGPpubKey:
|
||||
optionsStr += '<p class="pgp">' + \
|
||||
removeHtml(PGPpubKey).replace('\n', '<br>') + '</p>\n'
|
||||
remove_html(PGPpubKey).replace('\n', '<br>') + '</p>\n'
|
||||
optionsStr += ' <form method="POST" action="' + \
|
||||
originPathStr + '/personoptions">\n'
|
||||
optionsStr += ' <input type="hidden" name="pageNumber" value="' + \
|
||||
|
|
|
@ -24,7 +24,7 @@ from posts import getPersonBox
|
|||
from posts import downloadAnnounce
|
||||
from posts import populateRepliesJson
|
||||
from utils import removeHashFromPostId
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import get_actor_languages_list
|
||||
from utils import get_base_content_from_post
|
||||
from utils import get_content_from_post
|
||||
|
@ -114,7 +114,7 @@ def _htmlPostMetadataOpenGraph(domain: str, post_json_object: {}) -> str:
|
|||
"\" property=\"og:published_time\" />\n"
|
||||
if not objJson.get('attachment') or objJson.get('sensitive'):
|
||||
if objJson.get('content') and not objJson.get('sensitive'):
|
||||
description = removeHtml(objJson['content'])
|
||||
description = remove_html(objJson['content'])
|
||||
metadata += \
|
||||
" <meta content=\"" + description + \
|
||||
"\" name=\"description\">\n"
|
||||
|
@ -142,7 +142,7 @@ def _htmlPostMetadataOpenGraph(domain: str, post_json_object: {}) -> str:
|
|||
description = 'Attached: 1 audio'
|
||||
if description:
|
||||
if objJson.get('content') and not objJson.get('sensitive'):
|
||||
description += '\n\n' + removeHtml(objJson['content'])
|
||||
description += '\n\n' + remove_html(objJson['content'])
|
||||
metadata += \
|
||||
" <meta content=\"" + description + \
|
||||
"\" name=\"description\">\n"
|
||||
|
|
|
@ -21,7 +21,7 @@ from utils import is_dormant
|
|||
from utils import getNicknameFromActor
|
||||
from utils import getDomainFromActor
|
||||
from utils import is_system_account
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import load_json
|
||||
from utils import get_config_param
|
||||
from utils import get_image_formats
|
||||
|
@ -242,7 +242,7 @@ def htmlProfileAfterSearch(cssCache: {},
|
|||
avatarDescription = avatarDescription.replace('<p>', '')
|
||||
avatarDescription = avatarDescription.replace('</p>', '')
|
||||
if '<' in avatarDescription:
|
||||
avatarDescription = removeHtml(avatarDescription)
|
||||
avatarDescription = remove_html(avatarDescription)
|
||||
|
||||
imageUrl = ''
|
||||
if profile_json.get('image'):
|
||||
|
|
|
@ -12,7 +12,7 @@ from shutil import copyfile
|
|||
from collections import OrderedDict
|
||||
from session import getJson
|
||||
from utils import is_account_dir
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import getProtocolPrefixes
|
||||
from utils import load_json
|
||||
from utils import get_cached_post_filename
|
||||
|
@ -230,7 +230,7 @@ def _setActorPropertyUrl(actor_json: {}, property_name: str, url: str) -> None:
|
|||
def setBlogAddress(actor_json: {}, blogAddress: str) -> None:
|
||||
"""Sets an blog address for the given actor
|
||||
"""
|
||||
_setActorPropertyUrl(actor_json, 'Blog', removeHtml(blogAddress))
|
||||
_setActorPropertyUrl(actor_json, 'Blog', remove_html(blogAddress))
|
||||
|
||||
|
||||
def updateAvatarImageCache(signing_priv_key_pem: str,
|
||||
|
@ -650,8 +650,8 @@ def htmlHeaderWithPersonMarkup(cssFilename: str, instanceTitle: str,
|
|||
firstEntry = False
|
||||
skillsMarkup += '\n ],\n'
|
||||
|
||||
description = removeHtml(actor_json['summary'])
|
||||
nameStr = removeHtml(actor_json['name'])
|
||||
description = remove_html(actor_json['summary'])
|
||||
nameStr = remove_html(actor_json['name'])
|
||||
domain_full = actor_json['id'].split('://')[1].split('/')[0]
|
||||
handle = actor_json['preferredUsername'] + '@' + domain_full
|
||||
|
||||
|
@ -689,7 +689,7 @@ def htmlHeaderWithPersonMarkup(cssFilename: str, instanceTitle: str,
|
|||
' }\n' + \
|
||||
' </script>\n'
|
||||
|
||||
description = removeHtml(description)
|
||||
description = remove_html(description)
|
||||
ogMetadata = \
|
||||
" <meta content=\"profile\" property=\"og:type\" />\n" + \
|
||||
" <meta content=\"" + description + \
|
||||
|
|
|
@ -10,7 +10,7 @@ __module_group__ = "Onboarding"
|
|||
import os
|
||||
from shutil import copyfile
|
||||
from utils import get_config_param
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import acct_dir
|
||||
from webapp_utils import htmlHeaderWithExternalStyle
|
||||
from webapp_utils import htmlFooter
|
||||
|
@ -79,7 +79,7 @@ def htmlWelcomeScreen(base_dir: str, nickname: str,
|
|||
with open(welcomeFilename, 'r') as welcomeFile:
|
||||
welcomeText = welcomeFile.read()
|
||||
welcomeText = welcomeText.replace('INSTANCE', instanceTitle)
|
||||
welcomeText = markdownToHtml(removeHtml(welcomeText))
|
||||
welcomeText = markdownToHtml(remove_html(welcomeText))
|
||||
|
||||
welcomeForm = ''
|
||||
cssFilename = base_dir + '/epicyon-welcome.css'
|
||||
|
|
|
@ -9,7 +9,7 @@ __module_group__ = "Onboarding"
|
|||
|
||||
import os
|
||||
from shutil import copyfile
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import get_config_param
|
||||
from webapp_utils import htmlHeaderWithExternalStyle
|
||||
from webapp_utils import htmlFooter
|
||||
|
@ -54,7 +54,7 @@ def htmlWelcomeFinal(base_dir: str, nickname: str, domain: str,
|
|||
with open(finalFilename, 'r') as finalFile:
|
||||
finalText = finalFile.read()
|
||||
finalText = finalText.replace('INSTANCE', instanceTitle)
|
||||
finalText = markdownToHtml(removeHtml(finalText))
|
||||
finalText = markdownToHtml(remove_html(finalText))
|
||||
|
||||
finalForm = ''
|
||||
cssFilename = base_dir + '/epicyon-welcome.css'
|
||||
|
|
|
@ -9,7 +9,7 @@ __module_group__ = "Onboarding"
|
|||
|
||||
import os
|
||||
from shutil import copyfile
|
||||
from utils import removeHtml
|
||||
from utils import remove_html
|
||||
from utils import load_json
|
||||
from utils import get_config_param
|
||||
from utils import get_image_extensions
|
||||
|
@ -60,7 +60,7 @@ def htmlWelcomeProfile(base_dir: str, nickname: str, domain: str,
|
|||
with open(profileFilename, 'r') as profileFile:
|
||||
profileText = profileFile.read()
|
||||
profileText = profileText.replace('INSTANCE', instanceTitle)
|
||||
profileText = markdownToHtml(removeHtml(profileText))
|
||||
profileText = markdownToHtml(remove_html(profileText))
|
||||
|
||||
profileForm = ''
|
||||
cssFilename = base_dir + '/epicyon-welcome.css'
|
||||
|
|
Loading…
Reference in New Issue