mirror of https://gitlab.com/bashrc2/epicyon
Merge branch 'main' of ssh://code.freedombone.net:2222/bashrc/epicyon into main
commit
7b05b6172d
9
blog.py
9
blog.py
|
@ -15,6 +15,7 @@ from webapp import htmlHeaderWithExternalStyle
|
|||
from webapp import htmlFooter
|
||||
from webapp_media import addEmbeddedElements
|
||||
from webapp_utils import getPostAttachmentsAsHtml
|
||||
from utils import getMediaFormats
|
||||
from utils import getNicknameFromActor
|
||||
from utils import getDomainFromActor
|
||||
from utils import locatePost
|
||||
|
@ -724,12 +725,11 @@ def htmlEditBlog(mediaInstance: bool, translate: {},
|
|||
|
||||
iconsPath = getIconsWebPath(baseDir)
|
||||
|
||||
editBlogText = '<p class="new-post-text">' + \
|
||||
translate['Write your post text below.'] + '</p>'
|
||||
editBlogText = '<h1">' + translate['Write your post text below.'] + '</h1>'
|
||||
|
||||
if os.path.isfile(baseDir + '/accounts/newpost.txt'):
|
||||
with open(baseDir + '/accounts/newpost.txt', 'r') as file:
|
||||
editBlogText = '<p class="new-post-text">' + file.read() + '</p>'
|
||||
editBlogText = '<p>' + file.read() + '</p>'
|
||||
|
||||
cssFilename = baseDir + '/epicyon-profile.css'
|
||||
if os.path.isfile(baseDir + '/epicyon.css'):
|
||||
|
@ -746,8 +746,7 @@ def htmlEditBlog(mediaInstance: bool, translate: {},
|
|||
editBlogImageSection += \
|
||||
' <input type="file" id="attachpic" name="attachpic"'
|
||||
editBlogImageSection += \
|
||||
' accept=".png, .jpg, .jpeg, .gif, .webp, .avif, ' + \
|
||||
'.mp4, .webm, .ogv, .mp3, .ogg">'
|
||||
' accept="' + getMediaFormats() + '">'
|
||||
editBlogImageSection += ' </div>'
|
||||
|
||||
placeholderMessage = translate['Write something'] + '...'
|
||||
|
|
|
@ -9,6 +9,7 @@ __status__ = "Production"
|
|||
import os
|
||||
import email.parser
|
||||
from shutil import copyfile
|
||||
from utils import getImageExtensions
|
||||
from utils import loadJson
|
||||
from utils import fileLastModified
|
||||
from utils import getLinkPrefixes
|
||||
|
@ -939,7 +940,7 @@ def saveMediaInFormPOST(mediaBytes, debug: bool,
|
|||
break
|
||||
|
||||
# remove any existing image files with a different format
|
||||
extensionTypes = ('png', 'jpg', 'jpeg', 'gif', 'webp', 'avif')
|
||||
extensionTypes = getImageExtensions()
|
||||
for ex in extensionTypes:
|
||||
if ex == detectedExtension:
|
||||
continue
|
||||
|
|
10
daemon.py
10
daemon.py
|
@ -166,6 +166,7 @@ from shares import getSharesFeedForPerson
|
|||
from shares import addShare
|
||||
from shares import removeShare
|
||||
from shares import expireShares
|
||||
from utils import getImageExtensions
|
||||
from utils import mediaFileMimeType
|
||||
from utils import getCSS
|
||||
from utils import firstParagraphFromString
|
||||
|
@ -8412,7 +8413,8 @@ class PubServer(BaseHTTPRequestHandler):
|
|||
GETstartTime, GETtimings: {}) -> bool:
|
||||
"""Show a background image
|
||||
"""
|
||||
for ext in ('webp', 'gif', 'jpg', 'png', 'avif'):
|
||||
imageExtensions = getImageExtensions()
|
||||
for ext in imageExtensions:
|
||||
for bg in ('follow', 'options', 'login'):
|
||||
# follow screen background image
|
||||
if path.endswith('/' + bg + '-background.' + ext):
|
||||
|
@ -12386,7 +12388,8 @@ def loadTokens(baseDir: str, tokensDict: {}, tokensLookup: {}) -> None:
|
|||
tokensLookup[token] = nickname
|
||||
|
||||
|
||||
def runDaemon(allowLocalNetworkAccess: bool,
|
||||
def runDaemon(maxNewswirePosts: int,
|
||||
allowLocalNetworkAccess: bool,
|
||||
maxFeedItemSizeKb: int,
|
||||
publishButtonAtTop: bool,
|
||||
rssIconAtTop: bool,
|
||||
|
@ -12461,6 +12464,9 @@ def runDaemon(allowLocalNetworkAccess: bool,
|
|||
# newswire storing rss feeds
|
||||
httpd.newswire = {}
|
||||
|
||||
# maximum number of posts to appear in the newswire on the right column
|
||||
httpd.maxNewswirePosts = maxNewswirePosts
|
||||
|
||||
# This counter is used to update the list of blocked domains in memory.
|
||||
# It helps to avoid touching the disk and so improves flooding resistance
|
||||
httpd.blocklistUpdateCtr = 0
|
||||
|
|
|
@ -66,6 +66,7 @@
|
|||
--column-right-width: 10vw;
|
||||
--banner-height: 15vh;
|
||||
--banner-height-mobile: 10vh;
|
||||
--header-font: 'Arial, Helvetica, sans-serif';
|
||||
}
|
||||
|
||||
@font-face {
|
||||
|
@ -129,10 +130,6 @@ blockquote p {
|
|||
border: 2px solid var(--focus-color);
|
||||
}
|
||||
|
||||
h1 {
|
||||
color: var(--title-color);
|
||||
}
|
||||
|
||||
a, u {
|
||||
color: var(--main-fg-color);
|
||||
}
|
||||
|
@ -214,10 +211,9 @@ a:focus {
|
|||
transform: translateY(30%) scaleX(-1);
|
||||
}
|
||||
|
||||
.new-post-text {
|
||||
font-size: var(--font-size2);
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
padding: 4px 0;
|
||||
h1 {
|
||||
font-family: var(--header-font);
|
||||
color: var(--title-color);
|
||||
}
|
||||
|
||||
.new-post-subtext {
|
||||
|
|
13
epicyon.py
13
epicyon.py
|
@ -116,6 +116,10 @@ parser.add_argument('--postsPerSource',
|
|||
dest='maxNewswirePostsPerSource', type=int,
|
||||
default=4,
|
||||
help='Maximum newswire posts per feed or account')
|
||||
parser.add_argument('--maxNewswirePosts',
|
||||
dest='maxNewswirePosts', type=int,
|
||||
default=20,
|
||||
help='Maximum newswire posts in the right column')
|
||||
parser.add_argument('--maxFeedSize',
|
||||
dest='maxNewswireFeedSizeKb', type=int,
|
||||
default=10240,
|
||||
|
@ -2001,6 +2005,12 @@ maxNewswirePostsPerSource = \
|
|||
if maxNewswirePostsPerSource:
|
||||
args.maxNewswirePostsPerSource = int(maxNewswirePostsPerSource)
|
||||
|
||||
# set the maximum number of newswire posts appearing in the right column
|
||||
maxNewswirePosts = \
|
||||
getConfigParam(baseDir, 'maxNewswirePosts')
|
||||
if maxNewswirePosts:
|
||||
args.maxNewswirePosts = int(maxNewswirePosts)
|
||||
|
||||
# set the maximum size of a newswire rss/atom feed in Kilobytes
|
||||
maxNewswireFeedSizeKb = \
|
||||
getConfigParam(baseDir, 'maxNewswireFeedSizeKb')
|
||||
|
@ -2075,7 +2085,8 @@ if setTheme(baseDir, themeName, domain, args.allowLocalNetworkAccess):
|
|||
print('Theme set to ' + themeName)
|
||||
|
||||
if __name__ == "__main__":
|
||||
runDaemon(args.allowLocalNetworkAccess,
|
||||
runDaemon(args.maxNewswirePosts,
|
||||
args.allowLocalNetworkAccess,
|
||||
args.maxFeedItemSizeKb,
|
||||
args.publishButtonAtTop,
|
||||
args.rssIconAtTop,
|
||||
|
|
17
media.py
17
media.py
|
@ -13,6 +13,10 @@ import os
|
|||
import datetime
|
||||
from hashlib import sha1
|
||||
from auth import createPassword
|
||||
from utils import getImageExtensions
|
||||
from utils import getVideoExtensions
|
||||
from utils import getAudioExtensions
|
||||
from utils import getMediaExtensions
|
||||
from shutil import copyfile
|
||||
from shutil import rmtree
|
||||
from shutil import move
|
||||
|
@ -56,8 +60,7 @@ def getImageHash(imageFilename: str) -> str:
|
|||
|
||||
|
||||
def isMedia(imageFilename: str) -> bool:
|
||||
permittedMedia = ('png', 'jpg', 'gif', 'webp', 'avif',
|
||||
'mp4', 'ogv', 'mp3', 'ogg')
|
||||
permittedMedia = getMediaExtensions()
|
||||
for m in permittedMedia:
|
||||
if imageFilename.endswith('.' + m):
|
||||
return True
|
||||
|
@ -83,16 +86,15 @@ def getAttachmentMediaType(filename: str) -> str:
|
|||
image, video or audio
|
||||
"""
|
||||
mediaType = None
|
||||
imageTypes = ('png', 'jpg', 'jpeg',
|
||||
'gif', 'webp', 'avif')
|
||||
imageTypes = getImageExtensions()
|
||||
for mType in imageTypes:
|
||||
if filename.endswith('.' + mType):
|
||||
return 'image'
|
||||
videoTypes = ('mp4', 'webm', 'ogv')
|
||||
videoTypes = getVideoExtensions()
|
||||
for mType in videoTypes:
|
||||
if filename.endswith('.' + mType):
|
||||
return 'video'
|
||||
audioTypes = ('mp3', 'ogg')
|
||||
audioTypes = getAudioExtensions()
|
||||
for mType in audioTypes:
|
||||
if filename.endswith('.' + mType):
|
||||
return 'audio'
|
||||
|
@ -143,8 +145,7 @@ def attachMedia(baseDir: str, httpPrefix: str, domain: str, port: int,
|
|||
return postJson
|
||||
|
||||
fileExtension = None
|
||||
acceptedTypes = ('png', 'jpg', 'gif', 'webp', 'avif',
|
||||
'mp4', 'webm', 'ogv', 'mp3', 'ogg')
|
||||
acceptedTypes = getMediaExtensions()
|
||||
for mType in acceptedTypes:
|
||||
if imageFilename.endswith('.' + mType):
|
||||
if mType == 'jpg':
|
||||
|
|
|
@ -711,18 +711,13 @@ def runNewswireDaemon(baseDir: str, httpd,
|
|||
print('Newswire daemon session established')
|
||||
|
||||
# try to update the feeds
|
||||
newNewswire = None
|
||||
try:
|
||||
newNewswire = \
|
||||
getDictFromNewswire(httpd.session, baseDir, domain,
|
||||
httpd.maxNewswirePostsPerSource,
|
||||
httpd.maxNewswireFeedSizeKb,
|
||||
httpd.maxTags,
|
||||
httpd.maxFeedItemSizeKb)
|
||||
except Exception as e:
|
||||
print('WARN: unable to update newswire ' + str(e))
|
||||
time.sleep(120)
|
||||
continue
|
||||
newNewswire = \
|
||||
getDictFromNewswire(httpd.session, baseDir, domain,
|
||||
httpd.maxNewswirePostsPerSource,
|
||||
httpd.maxNewswireFeedSizeKb,
|
||||
httpd.maxTags,
|
||||
httpd.maxFeedItemSizeKb,
|
||||
httpd.maxNewswirePosts)
|
||||
|
||||
if not httpd.newswire:
|
||||
if os.path.isfile(newswireStateFilename):
|
||||
|
|
294
newswire.py
294
newswire.py
|
@ -11,6 +11,8 @@ import requests
|
|||
from socket import error as SocketError
|
||||
import errno
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
from datetime import timezone
|
||||
from collections import OrderedDict
|
||||
from utils import firstParagraphFromString
|
||||
from utils import isPublicPost
|
||||
|
@ -25,6 +27,16 @@ from blocking import isBlockedHashtag
|
|||
from filters import isFiltered
|
||||
|
||||
|
||||
def removeCDATA(text: str) -> str:
|
||||
"""Removes any CDATA from the given text
|
||||
"""
|
||||
if 'CDATA[' in text:
|
||||
text = text.split('CDATA[')[1]
|
||||
if ']' in text:
|
||||
text = text.split(']')[0]
|
||||
return text
|
||||
|
||||
|
||||
def rss2Header(httpPrefix: str,
|
||||
nickname: str, domainFull: str,
|
||||
title: str, translate: {}) -> str:
|
||||
|
@ -125,6 +137,71 @@ def addNewswireDictEntry(baseDir: str, domain: str,
|
|||
]
|
||||
|
||||
|
||||
def parseFeedDate(pubDate: str) -> str:
|
||||
"""Returns a UTC date string based on the given date string
|
||||
This tries a number of formats to see which work
|
||||
"""
|
||||
formats = ("%a, %d %b %Y %H:%M:%S %z",
|
||||
"%a, %d %b %Y %H:%M:%S EST",
|
||||
"%a, %d %b %Y %H:%M:%S UT",
|
||||
"%Y-%m-%dT%H:%M:%SZ",
|
||||
"%Y-%m-%dT%H:%M:%S%z")
|
||||
|
||||
publishedDate = None
|
||||
for dateFormat in formats:
|
||||
if ',' in pubDate and ',' not in dateFormat:
|
||||
continue
|
||||
if ',' not in pubDate and ',' in dateFormat:
|
||||
continue
|
||||
if '-' in pubDate and '-' not in dateFormat:
|
||||
continue
|
||||
if '-' not in pubDate and '-' in dateFormat:
|
||||
continue
|
||||
if 'T' in pubDate and 'T' not in dateFormat:
|
||||
continue
|
||||
if 'T' not in pubDate and 'T' in dateFormat:
|
||||
continue
|
||||
if 'Z' in pubDate and 'Z' not in dateFormat:
|
||||
continue
|
||||
if 'Z' not in pubDate and 'Z' in dateFormat:
|
||||
continue
|
||||
if 'EST' not in pubDate and 'EST' in dateFormat:
|
||||
continue
|
||||
if 'EST' in pubDate and 'EST' not in dateFormat:
|
||||
continue
|
||||
if 'UT' not in pubDate and 'UT' in dateFormat:
|
||||
continue
|
||||
if 'UT' in pubDate and 'UT' not in dateFormat:
|
||||
continue
|
||||
|
||||
try:
|
||||
publishedDate = \
|
||||
datetime.strptime(pubDate, dateFormat)
|
||||
except BaseException:
|
||||
print('WARN: unrecognized date format: ' +
|
||||
pubDate + ' ' + dateFormat)
|
||||
continue
|
||||
|
||||
if publishedDate:
|
||||
if pubDate.endswith(' EST'):
|
||||
hoursAdded = timedelta(hours=5)
|
||||
publishedDate = publishedDate + hoursAdded
|
||||
break
|
||||
|
||||
pubDateStr = None
|
||||
if publishedDate:
|
||||
offset = publishedDate.utcoffset()
|
||||
if offset:
|
||||
publishedDate = publishedDate - offset
|
||||
# convert local date to UTC
|
||||
publishedDate = publishedDate.replace(tzinfo=timezone.utc)
|
||||
pubDateStr = str(publishedDate)
|
||||
if not pubDateStr.endswith('+00:00'):
|
||||
pubDateStr += '+00:00'
|
||||
|
||||
return pubDateStr
|
||||
|
||||
|
||||
def xml2StrToDict(baseDir: str, domain: str, xmlStr: str,
|
||||
moderated: bool, mirrored: bool,
|
||||
maxPostsPerSource: int,
|
||||
|
@ -154,11 +231,17 @@ def xml2StrToDict(baseDir: str, domain: str, xmlStr: str,
|
|||
if '</pubDate>' not in rssItem:
|
||||
continue
|
||||
title = rssItem.split('<title>')[1]
|
||||
title = title.split('</title>')[0]
|
||||
title = removeCDATA(title.split('</title>')[0])
|
||||
description = ''
|
||||
if '<description>' in rssItem and '</description>' in rssItem:
|
||||
description = rssItem.split('<description>')[1]
|
||||
description = description.split('</description>')[0]
|
||||
description = removeCDATA(description.split('</description>')[0])
|
||||
else:
|
||||
if '<media:description>' in rssItem and \
|
||||
'</media:description>' in rssItem:
|
||||
description = rssItem.split('<media:description>')[1]
|
||||
description = description.split('</media:description>')[0]
|
||||
description = removeCDATA(description)
|
||||
link = rssItem.split('<link>')[1]
|
||||
link = link.split('</link>')[0]
|
||||
if '://' not in link:
|
||||
|
@ -170,42 +253,19 @@ def xml2StrToDict(baseDir: str, domain: str, xmlStr: str,
|
|||
continue
|
||||
pubDate = rssItem.split('<pubDate>')[1]
|
||||
pubDate = pubDate.split('</pubDate>')[0]
|
||||
parsed = False
|
||||
try:
|
||||
publishedDate = \
|
||||
datetime.strptime(pubDate, "%a, %d %b %Y %H:%M:%S %z")
|
||||
|
||||
pubDateStr = parseFeedDate(pubDate)
|
||||
if pubDateStr:
|
||||
postFilename = ''
|
||||
votesStatus = []
|
||||
addNewswireDictEntry(baseDir, domain,
|
||||
result, str(publishedDate),
|
||||
result, pubDateStr,
|
||||
title, link,
|
||||
votesStatus, postFilename,
|
||||
description, moderated, mirrored)
|
||||
postCtr += 1
|
||||
if postCtr >= maxPostsPerSource:
|
||||
break
|
||||
parsed = True
|
||||
except BaseException:
|
||||
pass
|
||||
if not parsed:
|
||||
try:
|
||||
publishedDate = \
|
||||
datetime.strptime(pubDate, "%a, %d %b %Y %H:%M:%S UT")
|
||||
postFilename = ''
|
||||
votesStatus = []
|
||||
addNewswireDictEntry(baseDir, domain,
|
||||
result,
|
||||
str(publishedDate) + '+00:00',
|
||||
title, link,
|
||||
votesStatus, postFilename,
|
||||
description, moderated, mirrored)
|
||||
postCtr += 1
|
||||
if postCtr >= maxPostsPerSource:
|
||||
break
|
||||
parsed = True
|
||||
except BaseException:
|
||||
print('WARN: unrecognized RSS date format: ' + pubDate)
|
||||
pass
|
||||
return result
|
||||
|
||||
|
||||
|
@ -218,32 +278,38 @@ def atomFeedToDict(baseDir: str, domain: str, xmlStr: str,
|
|||
if '<entry>' not in xmlStr:
|
||||
return {}
|
||||
result = {}
|
||||
rssItems = xmlStr.split('<entry>')
|
||||
atomItems = xmlStr.split('<entry>')
|
||||
postCtr = 0
|
||||
maxBytes = maxFeedItemSizeKb * 1024
|
||||
for rssItem in rssItems:
|
||||
if len(rssItem) > maxBytes:
|
||||
for atomItem in atomItems:
|
||||
if len(atomItem) > maxBytes:
|
||||
print('WARN: atom feed item is too big')
|
||||
continue
|
||||
if '<title>' not in rssItem:
|
||||
if '<title>' not in atomItem:
|
||||
continue
|
||||
if '</title>' not in rssItem:
|
||||
if '</title>' not in atomItem:
|
||||
continue
|
||||
if '<link>' not in rssItem:
|
||||
if '<link>' not in atomItem:
|
||||
continue
|
||||
if '</link>' not in rssItem:
|
||||
if '</link>' not in atomItem:
|
||||
continue
|
||||
if '<updated>' not in rssItem:
|
||||
if '<updated>' not in atomItem:
|
||||
continue
|
||||
if '</updated>' not in rssItem:
|
||||
if '</updated>' not in atomItem:
|
||||
continue
|
||||
title = rssItem.split('<title>')[1]
|
||||
title = title.split('</title>')[0]
|
||||
title = atomItem.split('<title>')[1]
|
||||
title = removeCDATA(title.split('</title>')[0])
|
||||
description = ''
|
||||
if '<summary>' in rssItem and '</summary>' in rssItem:
|
||||
description = rssItem.split('<summary>')[1]
|
||||
description = description.split('</summary>')[0]
|
||||
link = rssItem.split('<link>')[1]
|
||||
if '<summary>' in atomItem and '</summary>' in atomItem:
|
||||
description = atomItem.split('<summary>')[1]
|
||||
description = removeCDATA(description.split('</summary>')[0])
|
||||
else:
|
||||
if '<media:description>' in atomItem and \
|
||||
'</media:description>' in atomItem:
|
||||
description = atomItem.split('<media:description>')[1]
|
||||
description = description.split('</media:description>')[0]
|
||||
description = removeCDATA(description)
|
||||
link = atomItem.split('<link>')[1]
|
||||
link = link.split('</link>')[0]
|
||||
if '://' not in link:
|
||||
continue
|
||||
|
@ -252,43 +318,85 @@ def atomFeedToDict(baseDir: str, domain: str, xmlStr: str,
|
|||
itemDomain = itemDomain.split('/')[0]
|
||||
if isBlockedDomain(baseDir, itemDomain):
|
||||
continue
|
||||
pubDate = rssItem.split('<updated>')[1]
|
||||
pubDate = atomItem.split('<updated>')[1]
|
||||
pubDate = pubDate.split('</updated>')[0]
|
||||
parsed = False
|
||||
try:
|
||||
publishedDate = \
|
||||
datetime.strptime(pubDate, "%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
pubDateStr = parseFeedDate(pubDate)
|
||||
if pubDateStr:
|
||||
postFilename = ''
|
||||
votesStatus = []
|
||||
addNewswireDictEntry(baseDir, domain,
|
||||
result, str(publishedDate),
|
||||
result, pubDateStr,
|
||||
title, link,
|
||||
votesStatus, postFilename,
|
||||
description, moderated, mirrored)
|
||||
postCtr += 1
|
||||
if postCtr >= maxPostsPerSource:
|
||||
break
|
||||
return result
|
||||
|
||||
|
||||
def atomFeedYTToDict(baseDir: str, domain: str, xmlStr: str,
|
||||
moderated: bool, mirrored: bool,
|
||||
maxPostsPerSource: int,
|
||||
maxFeedItemSizeKb: int) -> {}:
|
||||
"""Converts an atom-style YouTube feed string to a dictionary
|
||||
"""
|
||||
if '<entry>' not in xmlStr:
|
||||
return {}
|
||||
if isBlockedDomain(baseDir, 'www.youtube.com'):
|
||||
return {}
|
||||
result = {}
|
||||
atomItems = xmlStr.split('<entry>')
|
||||
postCtr = 0
|
||||
maxBytes = maxFeedItemSizeKb * 1024
|
||||
for atomItem in atomItems:
|
||||
print('YouTube feed item: ' + atomItem)
|
||||
if len(atomItem) > maxBytes:
|
||||
print('WARN: atom feed item is too big')
|
||||
continue
|
||||
if '<title>' not in atomItem:
|
||||
continue
|
||||
if '</title>' not in atomItem:
|
||||
continue
|
||||
if '<updated>' not in atomItem:
|
||||
continue
|
||||
if '</updated>' not in atomItem:
|
||||
continue
|
||||
if '<yt:videoId>' not in atomItem:
|
||||
continue
|
||||
if '</yt:videoId>' not in atomItem:
|
||||
continue
|
||||
title = atomItem.split('<title>')[1]
|
||||
title = removeCDATA(title.split('</title>')[0])
|
||||
description = ''
|
||||
if '<media:description>' in atomItem and \
|
||||
'</media:description>' in atomItem:
|
||||
description = atomItem.split('<media:description>')[1]
|
||||
description = description.split('</media:description>')[0]
|
||||
description = removeCDATA(description)
|
||||
elif '<summary>' in atomItem and '</summary>' in atomItem:
|
||||
description = atomItem.split('<summary>')[1]
|
||||
description = description.split('</summary>')[0]
|
||||
description = removeCDATA(description)
|
||||
link = atomItem.split('<yt:videoId>')[1]
|
||||
link = link.split('</yt:videoId>')[0]
|
||||
link = 'https://www.youtube.com/watch?v=' + link.strip()
|
||||
pubDate = atomItem.split('<updated>')[1]
|
||||
pubDate = pubDate.split('</updated>')[0]
|
||||
|
||||
pubDateStr = parseFeedDate(pubDate)
|
||||
if pubDateStr:
|
||||
postFilename = ''
|
||||
votesStatus = []
|
||||
addNewswireDictEntry(baseDir, domain,
|
||||
result, pubDateStr,
|
||||
title, link,
|
||||
votesStatus, postFilename,
|
||||
description, moderated, mirrored)
|
||||
postCtr += 1
|
||||
if postCtr >= maxPostsPerSource:
|
||||
break
|
||||
parsed = True
|
||||
except BaseException:
|
||||
pass
|
||||
if not parsed:
|
||||
try:
|
||||
publishedDate = \
|
||||
datetime.strptime(pubDate, "%a, %d %b %Y %H:%M:%S UT")
|
||||
postFilename = ''
|
||||
votesStatus = []
|
||||
addNewswireDictEntry(baseDir, domain, result,
|
||||
str(publishedDate) + '+00:00',
|
||||
title, link,
|
||||
votesStatus, postFilename,
|
||||
description, moderated, mirrored)
|
||||
postCtr += 1
|
||||
if postCtr >= maxPostsPerSource:
|
||||
break
|
||||
parsed = True
|
||||
except BaseException:
|
||||
print('WARN: unrecognized atom feed date format: ' + pubDate)
|
||||
pass
|
||||
return result
|
||||
|
||||
|
||||
|
@ -298,7 +406,12 @@ def xmlStrToDict(baseDir: str, domain: str, xmlStr: str,
|
|||
maxFeedItemSizeKb: int) -> {}:
|
||||
"""Converts an xml string to a dictionary
|
||||
"""
|
||||
if 'rss version="2.0"' in xmlStr:
|
||||
if '<yt:videoId>' in xmlStr and '<yt:channelId>' in xmlStr:
|
||||
print('YouTube feed: reading')
|
||||
return atomFeedYTToDict(baseDir, domain,
|
||||
xmlStr, moderated, mirrored,
|
||||
maxPostsPerSource, maxFeedItemSizeKb)
|
||||
elif 'rss version="2.0"' in xmlStr:
|
||||
return xml2StrToDict(baseDir, domain,
|
||||
xmlStr, moderated, mirrored,
|
||||
maxPostsPerSource, maxFeedItemSizeKb)
|
||||
|
@ -309,6 +422,18 @@ def xmlStrToDict(baseDir: str, domain: str, xmlStr: str,
|
|||
return {}
|
||||
|
||||
|
||||
def YTchannelToAtomFeed(url: str) -> str:
|
||||
"""Converts a YouTube channel url into an atom feed url
|
||||
"""
|
||||
if 'youtube.com/channel/' not in url:
|
||||
return url
|
||||
channelId = url.split('youtube.com/channel/')[1].strip()
|
||||
channelUrl = \
|
||||
'https://www.youtube.com/feeds/videos.xml?channel_id=' + channelId
|
||||
print('YouTube feed: ' + channelUrl)
|
||||
return channelUrl
|
||||
|
||||
|
||||
def getRSS(baseDir: str, domain: str, session, url: str,
|
||||
moderated: bool, mirrored: bool,
|
||||
maxPostsPerSource: int, maxFeedSizeKb: int,
|
||||
|
@ -333,6 +458,7 @@ def getRSS(baseDir: str, domain: str, session, url: str,
|
|||
'Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0'
|
||||
if not session:
|
||||
print('WARN: no session specified for getRSS')
|
||||
url = YTchannelToAtomFeed(url)
|
||||
try:
|
||||
result = session.get(url, headers=sessionHeaders, params=sessionParams)
|
||||
if result:
|
||||
|
@ -343,7 +469,10 @@ def getRSS(baseDir: str, domain: str, session, url: str,
|
|||
maxPostsPerSource,
|
||||
maxFeedItemSizeKb)
|
||||
else:
|
||||
print('WARN: feed is too large: ' + url)
|
||||
print('WARN: feed is too large, ' +
|
||||
'or contains invalid characters: ' + url)
|
||||
else:
|
||||
print('WARN: no result returned for feed ' + url)
|
||||
except requests.exceptions.RequestException as e:
|
||||
print('ERROR: getRSS failed\nurl: ' + str(url) + '\n' +
|
||||
'headers: ' + str(sessionHeaders) + '\n' +
|
||||
|
@ -387,7 +516,7 @@ def getRSSfromDict(baseDir: str, newswire: {},
|
|||
continue
|
||||
rssStr += '<item>\n'
|
||||
rssStr += ' <title>' + fields[0] + '</title>\n'
|
||||
description = firstParagraphFromString(fields[4])
|
||||
description = removeCDATA(firstParagraphFromString(fields[4]))
|
||||
rssStr += ' <description>' + description + '</description>\n'
|
||||
url = fields[1]
|
||||
if '://' not in url:
|
||||
|
@ -507,6 +636,7 @@ def addAccountBlogsToNewswire(baseDir: str, nickname: str, domain: str,
|
|||
votes = loadJson(fullPostFilename + '.votes')
|
||||
content = postJsonObject['object']['content']
|
||||
description = firstParagraphFromString(content)
|
||||
description = removeCDATA(description)
|
||||
addNewswireDictEntry(baseDir, domain,
|
||||
newswire, published,
|
||||
postJsonObject['object']['summary'],
|
||||
|
@ -570,7 +700,8 @@ def addBlogsToNewswire(baseDir: str, domain: str, newswire: {},
|
|||
|
||||
def getDictFromNewswire(session, baseDir: str, domain: str,
|
||||
maxPostsPerSource: int, maxFeedSizeKb: int,
|
||||
maxTags: int, maxFeedItemSizeKb: int) -> {}:
|
||||
maxTags: int, maxFeedItemSizeKb: int,
|
||||
maxNewswirePosts: int) -> {}:
|
||||
"""Gets rss feeds as a dictionary from newswire file
|
||||
"""
|
||||
subscriptionsFilename = baseDir + '/accounts/newswire.txt'
|
||||
|
@ -621,4 +752,17 @@ def getDictFromNewswire(session, baseDir: str, domain: str,
|
|||
|
||||
# sort into chronological order, latest first
|
||||
sortedResult = OrderedDict(sorted(result.items(), reverse=True))
|
||||
|
||||
# are there too many posts? If so then remove the oldest ones
|
||||
noOfPosts = len(sortedResult.items())
|
||||
if noOfPosts > maxNewswirePosts:
|
||||
ctr = 0
|
||||
removals = []
|
||||
for dateStr, item in sortedResult.items():
|
||||
ctr += 1
|
||||
if ctr > maxNewswirePosts:
|
||||
removals.append(dateStr)
|
||||
for r in removals:
|
||||
sortedResult.pop(r)
|
||||
|
||||
return sortedResult
|
||||
|
|
|
@ -16,6 +16,7 @@ from session import postImage
|
|||
from utils import validNickname
|
||||
from utils import loadJson
|
||||
from utils import saveJson
|
||||
from utils import getImageExtensions
|
||||
from media import removeMetaData
|
||||
|
||||
|
||||
|
@ -54,7 +55,7 @@ def removeShare(baseDir: str, nickname: str, domain: str,
|
|||
# remove any image for the item
|
||||
itemIDfile = baseDir + '/sharefiles/' + nickname + '/' + itemID
|
||||
if sharesJson[itemID]['imageUrl']:
|
||||
formats = ('png', 'jpg', 'gif', 'webp', 'avif')
|
||||
formats = getImageExtensions()
|
||||
for ext in formats:
|
||||
if sharesJson[itemID]['imageUrl'].endswith('.' + ext):
|
||||
if os.path.isfile(itemIDfile + '.' + ext):
|
||||
|
@ -108,7 +109,7 @@ def addShare(baseDir: str,
|
|||
if not imageFilename:
|
||||
sharesImageFilename = \
|
||||
baseDir + '/accounts/' + nickname + '@' + domain + '/upload'
|
||||
formats = ('png', 'jpg', 'gif', 'webp', 'avif')
|
||||
formats = getImageExtensions()
|
||||
for ext in formats:
|
||||
if os.path.isfile(sharesImageFilename + '.' + ext):
|
||||
imageFilename = sharesImageFilename + '.' + ext
|
||||
|
@ -128,7 +129,7 @@ def addShare(baseDir: str,
|
|||
if not os.path.isdir(baseDir + '/sharefiles/' + nickname):
|
||||
os.mkdir(baseDir + '/sharefiles/' + nickname)
|
||||
itemIDfile = baseDir + '/sharefiles/' + nickname + '/' + itemID
|
||||
formats = ('png', 'jpg', 'gif', 'webp', 'avif')
|
||||
formats = getImageExtensions()
|
||||
for ext in formats:
|
||||
if imageFilename.endswith('.' + ext):
|
||||
removeMetaData(imageFilename, itemIDfile + '.' + ext)
|
||||
|
@ -202,7 +203,7 @@ def expireSharesForAccount(baseDir: str, nickname: str, domain: str) -> None:
|
|||
# remove any associated images
|
||||
itemIDfile = \
|
||||
baseDir + '/sharefiles/' + nickname + '/' + itemID
|
||||
formats = ('png', 'jpg', 'gif', 'webp', 'avif')
|
||||
formats = getImageExtensions()
|
||||
for ext in formats:
|
||||
if os.path.isfile(itemIDfile + '.' + ext):
|
||||
os.remove(itemIDfile + '.' + ext)
|
||||
|
|
22
tests.py
22
tests.py
|
@ -86,6 +86,7 @@ from jsonldsig import jsonldVerify
|
|||
from newsdaemon import hashtagRuleTree
|
||||
from newsdaemon import hashtagRuleResolve
|
||||
from newswire import getNewswireTags
|
||||
from newswire import parseFeedDate
|
||||
|
||||
testServerAliceRunning = False
|
||||
testServerBobRunning = False
|
||||
|
@ -292,8 +293,9 @@ def createServerAlice(path: str, domain: str, port: int,
|
|||
onionDomain = None
|
||||
i2pDomain = None
|
||||
allowLocalNetworkAccess = True
|
||||
maxNewswirePosts = 20
|
||||
print('Server running: Alice')
|
||||
runDaemon(allowLocalNetworkAccess,
|
||||
runDaemon(maxNewswirePosts, allowLocalNetworkAccess,
|
||||
2048, False, True, False, False, True, 10, False,
|
||||
0, 100, 1024, 5, False,
|
||||
0, False, 1, False, False, False,
|
||||
|
@ -359,8 +361,9 @@ def createServerBob(path: str, domain: str, port: int,
|
|||
onionDomain = None
|
||||
i2pDomain = None
|
||||
allowLocalNetworkAccess = True
|
||||
maxNewswirePosts = 20
|
||||
print('Server running: Bob')
|
||||
runDaemon(allowLocalNetworkAccess,
|
||||
runDaemon(maxNewswirePosts, allowLocalNetworkAccess,
|
||||
2048, False, True, False, False, True, 10, False,
|
||||
0, 100, 1024, 5, False, 0,
|
||||
False, 1, False, False, False,
|
||||
|
@ -400,8 +403,9 @@ def createServerEve(path: str, domain: str, port: int, federationList: [],
|
|||
onionDomain = None
|
||||
i2pDomain = None
|
||||
allowLocalNetworkAccess = True
|
||||
maxNewswirePosts = 20
|
||||
print('Server running: Eve')
|
||||
runDaemon(allowLocalNetworkAccess,
|
||||
runDaemon(maxNewswirePosts, allowLocalNetworkAccess,
|
||||
2048, False, True, False, False, True, 10, False,
|
||||
0, 100, 1024, 5, False, 0,
|
||||
False, 1, False, False, False,
|
||||
|
@ -2382,8 +2386,20 @@ def testFirstParagraphFromString():
|
|||
assert resultStr == testStr
|
||||
|
||||
|
||||
def testParseFeedDate():
|
||||
print('testParseFeedDate')
|
||||
pubDate = "2020-08-27T16:12:34+00:00"
|
||||
publishedDate = parseFeedDate(pubDate)
|
||||
assert publishedDate == "2020-08-27 16:12:34+00:00"
|
||||
|
||||
pubDate = "Sun, 22 Nov 2020 19:51:33 +0100"
|
||||
publishedDate = parseFeedDate(pubDate)
|
||||
assert publishedDate == "2020-11-22 18:51:33+00:00"
|
||||
|
||||
|
||||
def runAllTests():
|
||||
print('Running tests...')
|
||||
testParseFeedDate()
|
||||
testFirstParagraphFromString()
|
||||
testGetNewswireTags()
|
||||
testHashtagRuleTree()
|
||||
|
|
3
theme.py
3
theme.py
|
@ -9,6 +9,7 @@ __status__ = "Production"
|
|||
import os
|
||||
from utils import loadJson
|
||||
from utils import saveJson
|
||||
from utils import getImageExtensions
|
||||
from shutil import copyfile
|
||||
from content import dangerousCSS
|
||||
|
||||
|
@ -473,7 +474,7 @@ def setThemeImages(baseDir: str, name: str) -> None:
|
|||
|
||||
backgroundNames = ('login', 'shares', 'delete', 'follow',
|
||||
'options', 'block', 'search', 'calendar')
|
||||
extensions = ('webp', 'gif', 'jpg', 'png', 'avif')
|
||||
extensions = getImageExtensions()
|
||||
|
||||
for subdir, dirs, files in os.walk(baseDir + '/accounts'):
|
||||
for acct in dirs:
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
"main-link-color": "#05b9ec",
|
||||
"main-link-color-hover": "#46eed5",
|
||||
"main-fg-color": "white",
|
||||
"title-color": "white",
|
||||
"column-left-fg-color": "#05b9ec",
|
||||
"main-bg-color-dm": "#0b0a0a",
|
||||
"border-color": "#6800e7",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "احذف هذا المنشور",
|
||||
"Delete this event": "احذف هذا الحدث",
|
||||
"Reply to this post": "الرد على هذا المنصب",
|
||||
"Write your post text below.": "اكتب نص المنشور أدناه.",
|
||||
"Write your post text below.": "منشور جديد",
|
||||
"Write your reply to": "اكتب ردك على",
|
||||
"this post": "هذا المشنور",
|
||||
"Write your report below.": "اكتب تقريرك أدناه.",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "Suprimeix aquesta publicació",
|
||||
"Delete this event": "Suprimeix aquest esdeveniment",
|
||||
"Reply to this post": "Respon a aquesta publicació",
|
||||
"Write your post text below.": "Escriviu el vostre text a continuació.",
|
||||
"Write your post text below.": "Nova publicació",
|
||||
"Write your reply to": "Escriviu la vostra resposta a",
|
||||
"this post": "aquesta publicació",
|
||||
"Write your report below.": "Escriviu el vostre informe a continuació.",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "Dileu'r swydd hon",
|
||||
"Delete this event": "Dileu'r digwyddiad hwn",
|
||||
"Reply to this post": "Ymateb i'r swydd hon",
|
||||
"Write your post text below.": "Ysgrifennwch destun eich post isod.",
|
||||
"Write your post text below.": "Swydd newydd",
|
||||
"Write your reply to": "Ysgrifennwch eich ateb i",
|
||||
"this post": "y swydd hon",
|
||||
"Write your report below.": "Ysgrifennwch eich adroddiad isod.",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "Löschen",
|
||||
"Delete this event": "Löschen Sie dieses Ereignis",
|
||||
"Reply to this post": "Antworten",
|
||||
"Write your post text below.": "Schreiben Sie unten Ihren Beitrag.",
|
||||
"Write your post text below.": "Neuer Beitrag",
|
||||
"Write your reply to": "Schreiben Sie Ihre Antwort ",
|
||||
"this post": "auf diesen Beitrag",
|
||||
"Write your report below.": "Schreiben Sie Ihren Bericht unten.",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "Delete",
|
||||
"Delete this event": "Delete",
|
||||
"Reply to this post": "Reply",
|
||||
"Write your post text below.": "Write your post text below.",
|
||||
"Write your post text below.": "New post",
|
||||
"Write your reply to": "Write your reply to",
|
||||
"this post": "this post",
|
||||
"Write your report below.": "Write your report below.",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "Borra esta publicación",
|
||||
"Delete this event": "Eliminar este evento",
|
||||
"Reply to this post": "Responder a esta publicación",
|
||||
"Write your post text below.": "Escribe el texto de tu publicación a continuación.",
|
||||
"Write your post text below.": "Nueva publicación",
|
||||
"Write your reply to": "Escribe tu respuesta a",
|
||||
"this post": "esta publicación",
|
||||
"Write your report below.": "Escribe tu informe a continuación.",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "Supprimer ce post",
|
||||
"Delete this event": "Supprimer cet événement",
|
||||
"Reply to this post": "Répondre à ce post",
|
||||
"Write your post text below.": "Entrez votre message ci-dessous.",
|
||||
"Write your post text below.": "Nouveau poste",
|
||||
"Write your reply to": "Écrivez votre réponse à",
|
||||
"this post": "ce post",
|
||||
"Write your report below.": "Écrivez votre rapport ci-dessous.",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "Scrios an post seo",
|
||||
"Delete this event": "Scrios an imeacht seo",
|
||||
"Reply to this post": "Freagra ar an bpost seo",
|
||||
"Write your post text below.": "Scríobh do théacs poist thíos.",
|
||||
"Write your post text below.": "Post nua",
|
||||
"Write your reply to": "Scríobh do fhreagra",
|
||||
"this post": "an post seo",
|
||||
"Write your report below.": "Scríobh do thuarascáil thíos.",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "इस पोस्ट को मिटायें",
|
||||
"Delete this event": "इस ईवेंट को हटा दें",
|
||||
"Reply to this post": "इस पोस्ट का जवाब दें",
|
||||
"Write your post text below.": "अपना पोस्ट टेक्स्ट नीचे लिखें।",
|
||||
"Write your post text below.": "नई पोस्ट",
|
||||
"Write your reply to": "अपना उत्तर लिखें",
|
||||
"this post": "ये पद",
|
||||
"Write your report below.": "अपनी रिपोर्ट नीचे लिखें।",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "Elimina questo post",
|
||||
"Delete this event": "Elimina questo evento",
|
||||
"Reply to this post": "Rispondi a questo post",
|
||||
"Write your post text below.": "Scrivi il testo del tuo post qui sotto.",
|
||||
"Write your post text below.": "Nuovo post",
|
||||
"Write your reply to": "Scrivi la tua risposta a",
|
||||
"this post": "questo post",
|
||||
"Write your report below.": "Scrivi il tuo rapporto di seguito.",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "この投稿を削除",
|
||||
"Delete this event": "このイベントを削除",
|
||||
"Reply to this post": "この投稿への返信",
|
||||
"Write your post text below.": "以下に投稿テキストを書いてください。",
|
||||
"Write your post text below.": "新しい投稿",
|
||||
"Write your reply to": "への返信を書く",
|
||||
"this post": "この郵便受け",
|
||||
"Write your report below.": "以下にレポートを書いてください。",
|
||||
|
|
|
@ -159,7 +159,7 @@
|
|||
"Also see": "Vejatz tanben",
|
||||
"this post": "aquesta publicacion",
|
||||
"Write your reply to": "Escrivètz vòstra responsa a",
|
||||
"Write your post text below.": "Escrivètz lo tèxte de la publicacion çai-jos.",
|
||||
"Write your post text below.": "New post",
|
||||
"Reply to this post": "Respondre a aquesta publicacion",
|
||||
"Delete this post": "Suprimir aquesta publicacion",
|
||||
"Delete this event": "Suprimir aquesta publicacion",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "Excluir esta postagem",
|
||||
"Delete this event": "Excluir este evento",
|
||||
"Reply to this post": "Responder a este post",
|
||||
"Write your post text below.": "Escreva o texto da sua postagem abaixo.",
|
||||
"Write your post text below.": "Nova postagem",
|
||||
"Write your reply to": "Escreva sua resposta para",
|
||||
"this post": "esta postagem",
|
||||
"Write your report below.": "Escreva seu relatório abaixo.",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "Удалить этот пост",
|
||||
"Delete this event": "Удалить это событие",
|
||||
"Reply to this post": "Ответить на этот пост",
|
||||
"Write your post text below.": "Напишите текст вашего поста ниже.",
|
||||
"Write your post text below.": "Новый пост",
|
||||
"Write your reply to": "Напишите свой ответ",
|
||||
"this post": "эта почта",
|
||||
"Write your report below.": "Напишите свой отчет ниже.",
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
"Delete this post": "删除此帖子",
|
||||
"Delete this event": "删除此活动",
|
||||
"Reply to this post": "回复此帖子",
|
||||
"Write your post text below.": "在下面写您的帖子文字。",
|
||||
"Write your post text below.": "最新帖子",
|
||||
"Write your reply to": "写您的回覆",
|
||||
"this post": "这个帖子",
|
||||
"Write your report below.": "在下面写下您的报告。",
|
||||
|
|
54
utils.py
54
utils.py
|
@ -19,6 +19,58 @@ from calendar import monthrange
|
|||
from followingCalendar import addPersonToCalendar
|
||||
|
||||
|
||||
def getImageExtensions() -> []:
|
||||
"""Returns a list of the possible image file extensions
|
||||
"""
|
||||
return ('png', 'jpg', 'jpeg', 'gif', 'webp', 'avif')
|
||||
|
||||
|
||||
def getVideoExtensions() -> []:
|
||||
"""Returns a list of the possible video file extensions
|
||||
"""
|
||||
return ('mp4', 'webm', 'ogv')
|
||||
|
||||
|
||||
def getAudioExtensions() -> []:
|
||||
"""Returns a list of the possible audio file extensions
|
||||
"""
|
||||
return ('mp3', 'ogg')
|
||||
|
||||
|
||||
def getMediaExtensions() -> []:
|
||||
"""Returns a list of the possible media file extensions
|
||||
"""
|
||||
return getImageExtensions() + getVideoExtensions() + getAudioExtensions()
|
||||
|
||||
|
||||
def getImageFormats() -> str:
|
||||
"""Returns a string of permissable image formats
|
||||
used when selecting an image for a new post
|
||||
"""
|
||||
imageExt = getImageExtensions()
|
||||
|
||||
imageFormats = ''
|
||||
for ext in imageExt:
|
||||
if imageFormats:
|
||||
imageFormats += ', '
|
||||
imageFormats += '.' + ext
|
||||
return imageFormats
|
||||
|
||||
|
||||
def getMediaFormats() -> str:
|
||||
"""Returns a string of permissable media formats
|
||||
used when selecting an attachment for a new post
|
||||
"""
|
||||
mediaExt = getMediaExtensions()
|
||||
|
||||
mediaFormats = ''
|
||||
for ext in mediaExt:
|
||||
if mediaFormats:
|
||||
mediaFormats += ', '
|
||||
mediaFormats += '.' + ext
|
||||
return mediaFormats
|
||||
|
||||
|
||||
def removeHtml(content: str) -> str:
|
||||
"""Removes html links from the given content.
|
||||
Used to ensure that profile descriptions don't contain dubious content
|
||||
|
@ -193,7 +245,7 @@ def removeAvatarFromCache(baseDir: str, actorStr: str) -> None:
|
|||
"""Removes any existing avatar entries from the cache
|
||||
This avoids duplicate entries with differing extensions
|
||||
"""
|
||||
avatarFilenameExtensions = ('png', 'jpg', 'gif', 'webp', 'avif')
|
||||
avatarFilenameExtensions = getImageExtensions()
|
||||
for extension in avatarFilenameExtensions:
|
||||
avatarFilename = \
|
||||
baseDir + '/cache/avatars/' + actorStr + '.' + extension
|
||||
|
|
|
@ -287,7 +287,7 @@ def htmlEditLinks(cssCache: {}, translate: {}, baseDir: str, path: str,
|
|||
editLinksForm += \
|
||||
' <div class="vertical-center">\n'
|
||||
editLinksForm += \
|
||||
' <p class="new-post-text">' + translate['Edit Links'] + '</p>'
|
||||
' <h1>' + translate['Edit Links'] + '</h1>'
|
||||
editLinksForm += \
|
||||
' <div class="container">\n'
|
||||
editLinksForm += \
|
||||
|
|
|
@ -214,8 +214,12 @@ def htmlNewswire(baseDir: str, newswire: {}, nickname: str, moderator: bool,
|
|||
item[0] = item[0].split('CDATA[')[1]
|
||||
if ']' in item[0]:
|
||||
item[0] = item[0].split(']')[0]
|
||||
publishedDate = \
|
||||
datetime.strptime(dateStr, "%Y-%m-%d %H:%M:%S%z")
|
||||
try:
|
||||
publishedDate = \
|
||||
datetime.strptime(dateStr, "%Y-%m-%d %H:%M:%S%z")
|
||||
except BaseException:
|
||||
print('WARN: bad date format ' + dateStr)
|
||||
continue
|
||||
dateShown = publishedDate.strftime("%Y-%m-%d %H:%M")
|
||||
|
||||
dateStrLink = dateStr.replace('T', ' ')
|
||||
|
@ -495,7 +499,7 @@ def htmlEditNewswire(cssCache: {}, translate: {}, baseDir: str, path: str,
|
|||
editNewswireForm += \
|
||||
' <div class="vertical-center">\n'
|
||||
editNewswireForm += \
|
||||
' <p class="new-post-text">' + translate['Edit newswire'] + '</p>'
|
||||
' <h1>' + translate['Edit newswire'] + '</h1>'
|
||||
editNewswireForm += \
|
||||
' <div class="container">\n'
|
||||
# editNewswireForm += \
|
||||
|
@ -605,7 +609,7 @@ def htmlEditNewsPost(cssCache: {}, translate: {}, baseDir: str, path: str,
|
|||
editNewsPostForm += \
|
||||
' <div class="vertical-center">\n'
|
||||
editNewsPostForm += \
|
||||
' <p class="new-post-text">' + translate['Edit News Post'] + '</p>'
|
||||
' <h1>' + translate['Edit News Post'] + '</h1>'
|
||||
editNewsPostForm += \
|
||||
' <div class="container">\n'
|
||||
editNewsPostForm += \
|
||||
|
|
|
@ -10,6 +10,8 @@ import os
|
|||
from utils import isPublicPostFromUrl
|
||||
from utils import getNicknameFromActor
|
||||
from utils import getDomainFromActor
|
||||
from utils import getImageFormats
|
||||
from utils import getMediaFormats
|
||||
from webapp_utils import getIconsWebPath
|
||||
from webapp_utils import getBannerFile
|
||||
from webapp_utils import htmlHeaderWithExternalStyle
|
||||
|
@ -183,8 +185,8 @@ def htmlNewPost(cssCache: {}, mediaInstance: bool, translate: {},
|
|||
if not path.endswith('/newshare'):
|
||||
if not path.endswith('/newreport'):
|
||||
if not inReplyTo or path.endswith('/newreminder'):
|
||||
newPostText = '<p class="new-post-text">' + \
|
||||
translate['Write your post text below.'] + '</p>\n'
|
||||
newPostText = '<h1>' + \
|
||||
translate['Write your post text below.'] + '</h1>\n'
|
||||
else:
|
||||
newPostText = \
|
||||
'<p class="new-post-text">' + \
|
||||
|
@ -208,8 +210,8 @@ def htmlNewPost(cssCache: {}, mediaInstance: bool, translate: {},
|
|||
showPublicOnDropdown = False
|
||||
else:
|
||||
newPostText = \
|
||||
'<p class="new-post-text">' + \
|
||||
translate['Write your report below.'] + '</p>\n'
|
||||
'<h1>' + \
|
||||
translate['Write your report below.'] + '</h1>\n'
|
||||
|
||||
# custom report header with any additional instructions
|
||||
if os.path.isfile(baseDir + '/accounts/report.txt'):
|
||||
|
@ -233,20 +235,20 @@ def htmlNewPost(cssCache: {}, mediaInstance: bool, translate: {},
|
|||
translate['Terms of Service'] + '</a></p>\n'
|
||||
else:
|
||||
newPostText = \
|
||||
'<p class="new-post-text">' + \
|
||||
'<h1>' + \
|
||||
translate['Enter the details for your shared item below.'] + \
|
||||
'</p>\n'
|
||||
'</h1>\n'
|
||||
|
||||
if path.endswith('/newquestion'):
|
||||
newPostText = \
|
||||
'<p class="new-post-text">' + \
|
||||
'<h1>' + \
|
||||
translate['Enter the choices for your question below.'] + \
|
||||
'</p>\n'
|
||||
'</h1>\n'
|
||||
|
||||
if os.path.isfile(baseDir + '/accounts/newpost.txt'):
|
||||
with open(baseDir + '/accounts/newpost.txt', 'r') as file:
|
||||
newPostText = \
|
||||
'<p class="new-post-text">' + file.read() + '</p>\n'
|
||||
'<p>' + file.read() + '</p>\n'
|
||||
|
||||
cssFilename = baseDir + '/epicyon-profile.css'
|
||||
if os.path.isfile(baseDir + '/epicyon.css'):
|
||||
|
@ -280,13 +282,12 @@ def htmlNewPost(cssCache: {}, mediaInstance: bool, translate: {},
|
|||
newPostImageSection += \
|
||||
' <input type="file" id="attachpic" name="attachpic"'
|
||||
newPostImageSection += \
|
||||
' accept=".png, .jpg, .jpeg, .gif, .webp, .avif">\n'
|
||||
' accept="' + getImageFormats() + '">\n'
|
||||
else:
|
||||
newPostImageSection += \
|
||||
' <input type="file" id="attachpic" name="attachpic"'
|
||||
newPostImageSection += \
|
||||
' accept=".png, .jpg, .jpeg, .gif, ' + \
|
||||
'.webp, .avif, .mp4, .webm, .ogv, .mp3, .ogg">\n'
|
||||
' accept="' + getMediaFormats() + '">\n'
|
||||
newPostImageSection += ' </div>\n'
|
||||
|
||||
scopeIcon = 'scope_public.png'
|
||||
|
|
|
@ -76,6 +76,8 @@ def htmlHashTagSwarm(baseDir: str, actor: str, translate: {}) -> str:
|
|||
currTime = datetime.utcnow()
|
||||
daysSinceEpoch = (currTime - datetime(1970, 1, 1)).days
|
||||
daysSinceEpochStr = str(daysSinceEpoch) + ' '
|
||||
daysSinceEpochStr2 = str(daysSinceEpoch - 1) + ' '
|
||||
recently = daysSinceEpoch - 1
|
||||
tagSwarm = []
|
||||
domainHistogram = {}
|
||||
|
||||
|
@ -84,19 +86,26 @@ def htmlHashTagSwarm(baseDir: str, actor: str, translate: {}) -> str:
|
|||
tagsFilename = os.path.join(baseDir + '/tags', f)
|
||||
if not os.path.isfile(tagsFilename):
|
||||
continue
|
||||
|
||||
# get last modified datetime
|
||||
modTimesinceEpoc = os.path.getmtime(tagsFilename)
|
||||
lastModifiedDate = datetime.fromtimestamp(modTimesinceEpoc)
|
||||
fileDaysSinceEpoch = (lastModifiedDate - datetime(1970, 1, 1)).days
|
||||
# check if the file was last modified today
|
||||
if fileDaysSinceEpoch != daysSinceEpoch:
|
||||
|
||||
# check if the file was last modified within the previous
|
||||
# two days
|
||||
if fileDaysSinceEpoch < recently:
|
||||
continue
|
||||
|
||||
hashTagName = f.split('.')[0]
|
||||
if isBlockedHashtag(baseDir, hashTagName):
|
||||
continue
|
||||
if daysSinceEpochStr not in open(tagsFilename).read():
|
||||
continue
|
||||
with open(tagsFilename, 'r') as fp:
|
||||
# only read one line, which saves time and memory
|
||||
lastTag = fp.readline()
|
||||
if not lastTag.startswith(daysSinceEpochStr):
|
||||
if not lastTag.startswith(daysSinceEpochStr2):
|
||||
continue
|
||||
with open(tagsFilename, 'r') as tagsFile:
|
||||
while True:
|
||||
line = tagsFile.readline()
|
||||
|
@ -111,7 +120,7 @@ def htmlHashTagSwarm(baseDir: str, actor: str, translate: {}) -> str:
|
|||
if not postDaysSinceEpochStr.isdigit():
|
||||
break
|
||||
postDaysSinceEpoch = int(postDaysSinceEpochStr)
|
||||
if postDaysSinceEpoch < daysSinceEpoch - 1:
|
||||
if postDaysSinceEpoch < recently:
|
||||
break
|
||||
else:
|
||||
postUrl = sections[2]
|
||||
|
|
|
@ -14,6 +14,7 @@ from utils import isSystemAccount
|
|||
from utils import removeHtml
|
||||
from utils import loadJson
|
||||
from utils import getConfigParam
|
||||
from utils import getImageFormats
|
||||
from skills import getSkills
|
||||
from theme import getThemesList
|
||||
from person import personBoxJson
|
||||
|
@ -851,7 +852,7 @@ def htmlEditProfile(cssCache: {}, translate: {}, baseDir: str, path: str,
|
|||
defaultTimeline: str) -> str:
|
||||
"""Shows the edit profile screen
|
||||
"""
|
||||
imageFormats = '.png, .jpg, .jpeg, .gif, .webp, .avif'
|
||||
imageFormats = getImageFormats()
|
||||
path = path.replace('/inbox', '').replace('/outbox', '')
|
||||
path = path.replace('/shares', '')
|
||||
nickname = getNicknameFromActor(path)
|
||||
|
@ -1171,12 +1172,9 @@ def htmlEditProfile(cssCache: {}, translate: {}, baseDir: str, path: str,
|
|||
'accept-charset="UTF-8" action="' + path + '/profiledata">\n'
|
||||
editProfileForm += ' <div class="vertical-center">\n'
|
||||
editProfileForm += \
|
||||
' <p class="new-post-text">' + translate['Profile for'] + \
|
||||
' ' + nickname + '@' + domainFull + '</p>'
|
||||
' <h1>' + translate['Profile for'] + \
|
||||
' ' + nickname + '@' + domainFull + '</h1>'
|
||||
editProfileForm += ' <div class="container">\n'
|
||||
# editProfileForm += \
|
||||
# ' <a href="' + pathOriginal + '"><button class="cancelbtn">' + \
|
||||
# translate['Go Back'] + '</button></a>\n'
|
||||
editProfileForm += \
|
||||
' <center>\n' + \
|
||||
' <input type="submit" name="submitProfile" value="' + \
|
||||
|
|
|
@ -9,6 +9,7 @@ __status__ = "Production"
|
|||
import os
|
||||
from collections import OrderedDict
|
||||
from session import getJson
|
||||
from utils import getImageExtensions
|
||||
from utils import getProtocolPrefixes
|
||||
from utils import loadJson
|
||||
from utils import getCachedPostFilename
|
||||
|
@ -248,12 +249,6 @@ def updateAvatarImageCache(session, baseDir: str, httpPrefix: str,
|
|||
return avatarImageFilename.replace(baseDir + '/cache', '')
|
||||
|
||||
|
||||
def getImageExtensions() -> []:
|
||||
"""Returns a list of the possible image file extensions
|
||||
"""
|
||||
return ('png', 'jpg', 'jpeg', 'gif', 'webp', 'avif')
|
||||
|
||||
|
||||
def getPersonAvatarUrl(baseDir: str, personUrl: str, personCache: {},
|
||||
allowDownloads: bool) -> str:
|
||||
"""Returns the avatar url for the person
|
||||
|
|
Loading…
Reference in New Issue