epicyon/newswire.py

848 lines
30 KiB
Python
Raw Normal View History

2020-10-04 09:51:12 +00:00
__filename__ = "newswire.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
__version__ = "1.1.0"
__maintainer__ = "Bob Mottram"
__email__ = "bob@freedombone.net"
__status__ = "Production"
import os
import requests
from socket import error as SocketError
import errno
from datetime import datetime
2020-11-22 15:33:11 +00:00
from datetime import timedelta
2020-11-22 19:09:35 +00:00
from datetime import timezone
2020-10-04 09:51:12 +00:00
from collections import OrderedDict
2020-12-02 16:18:36 +00:00
from utils import setHashtagCategory
from utils import firstParagraphFromString
2020-10-25 10:42:38 +00:00
from utils import isPublicPost
2020-10-05 11:11:48 +00:00
from utils import locatePost
from utils import loadJson
from utils import saveJson
2020-10-06 08:58:44 +00:00
from utils import isSuspended
from utils import containsInvalidChars
from utils import removeHtml
from blocking import isBlockedDomain
2020-10-17 14:23:35 +00:00
from blocking import isBlockedHashtag
2020-10-17 16:08:07 +00:00
from filters import isFiltered
2020-10-04 09:51:12 +00:00
2020-10-16 12:11:05 +00:00
def removeCDATA(text: str) -> str:
"""Removes any CDATA from the given text
"""
if 'CDATA[' in text:
text = text.split('CDATA[')[1]
if ']' in text:
text = text.split(']')[0]
return text
2020-10-04 12:29:07 +00:00
def rss2Header(httpPrefix: str,
nickname: str, domainFull: str,
title: str, translate: {}) -> str:
2020-10-06 09:22:23 +00:00
"""Header for an RSS 2.0 feed
"""
2020-10-04 12:29:07 +00:00
rssStr = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>"
rssStr += "<rss version=\"2.0\">"
rssStr += '<channel>'
2020-10-13 17:14:57 +00:00
2020-10-04 12:29:07 +00:00
if title.startswith('News'):
rssStr += ' <title>Newswire</title>'
rssStr += ' <link>' + httpPrefix + '://' + domainFull + \
'/newswire.xml' + '</link>'
2020-10-13 17:14:57 +00:00
elif title.startswith('Site'):
rssStr += ' <title>' + domainFull + '</title>'
rssStr += ' <link>' + httpPrefix + '://' + domainFull + \
2020-10-13 17:17:17 +00:00
'/blog/rss.xml' + '</link>'
2020-10-04 12:29:07 +00:00
else:
2020-10-13 17:14:57 +00:00
rssStr += ' <title>' + translate[title] + '</title>'
2020-10-04 12:29:07 +00:00
rssStr += ' <link>' + httpPrefix + '://' + domainFull + \
'/users/' + nickname + '/rss.xml' + '</link>'
return rssStr
def rss2Footer() -> str:
2020-10-06 09:22:23 +00:00
"""Footer for an RSS 2.0 feed
"""
2020-10-04 12:29:07 +00:00
rssStr = '</channel>'
rssStr += '</rss>'
return rssStr
def getNewswireTags(text: str, maxTags: int) -> []:
2020-10-16 19:49:34 +00:00
"""Returns a list of hashtags found in the given text
"""
2020-10-16 20:46:34 +00:00
if '#' not in text:
return []
2020-10-16 19:49:34 +00:00
if ' ' not in text:
return []
textSimplified = \
text.replace(',', ' ').replace(';', ' ').replace('- ', ' ')
textSimplified = textSimplified.replace('. ', ' ').strip()
if textSimplified.endswith('.'):
textSimplified = textSimplified[:len(textSimplified)-1]
words = textSimplified.split(' ')
tags = []
for wrd in words:
if wrd.startswith('#'):
2020-10-16 19:52:27 +00:00
if len(wrd) > 1:
if wrd not in tags:
tags.append(wrd)
if len(tags) >= maxTags:
break
2020-10-16 19:49:34 +00:00
return tags
2020-10-17 16:08:07 +00:00
def addNewswireDictEntry(baseDir: str, domain: str,
newswire: {}, dateStr: str,
2020-11-08 10:05:28 +00:00
title: str, link: str,
votesStatus: str, postFilename: str,
2020-10-16 20:13:23 +00:00
description: str, moderated: bool,
mirrored: bool,
tags=[], maxTags=32) -> None:
"""Update the newswire dictionary
"""
allText = removeHtml(title + ' ' + description)
2020-10-25 10:17:12 +00:00
# check that none of the text is filtered against
2020-10-17 16:08:07 +00:00
if isFiltered(baseDir, 'news', domain, allText):
return
2020-10-25 10:17:12 +00:00
if tags is None:
tags = []
# extract hashtags from the text of the feed post
postTags = getNewswireTags(allText, maxTags)
# combine the tags into a single list
2020-10-25 12:57:14 +00:00
for tag in tags:
if tag not in postTags:
if len(postTags) < maxTags:
postTags.append(tag)
2020-10-25 10:17:12 +00:00
# check that no tags are blocked
2020-10-25 12:57:14 +00:00
for tag in postTags:
if isBlockedHashtag(baseDir, tag):
2020-10-25 10:18:07 +00:00
return
2020-10-25 10:17:12 +00:00
newswire[dateStr] = [
title,
link,
votesStatus,
postFilename,
description,
moderated,
2020-10-25 12:57:14 +00:00
postTags,
2020-10-25 10:17:12 +00:00
mirrored
]
2020-11-22 19:01:18 +00:00
def parseFeedDate(pubDate: str) -> str:
"""Returns a UTC date string based on the given date string
2020-11-22 18:14:40 +00:00
This tries a number of formats to see which work
"""
formats = ("%a, %d %b %Y %H:%M:%S %z",
"%a, %d %b %Y %H:%M:%S EST",
"%a, %d %b %Y %H:%M:%S UT",
"%Y-%m-%dT%H:%M:%SZ",
"%Y-%m-%dT%H:%M:%S%z")
publishedDate = None
for dateFormat in formats:
if ',' in pubDate and ',' not in dateFormat:
continue
if ',' not in pubDate and ',' in dateFormat:
continue
if '-' in pubDate and '-' not in dateFormat:
continue
if '-' not in pubDate and '-' in dateFormat:
continue
if 'T' in pubDate and 'T' not in dateFormat:
continue
if 'T' not in pubDate and 'T' in dateFormat:
continue
if 'Z' in pubDate and 'Z' not in dateFormat:
continue
if 'Z' not in pubDate and 'Z' in dateFormat:
continue
if 'EST' not in pubDate and 'EST' in dateFormat:
continue
if 'EST' in pubDate and 'EST' not in dateFormat:
continue
if 'UT' not in pubDate and 'UT' in dateFormat:
continue
if 'UT' in pubDate and 'UT' not in dateFormat:
continue
try:
publishedDate = \
2020-11-22 18:43:01 +00:00
datetime.strptime(pubDate, dateFormat)
2020-11-22 18:14:40 +00:00
except BaseException:
print('WARN: unrecognized date format: ' +
pubDate + ' ' + dateFormat)
continue
if publishedDate:
if pubDate.endswith(' EST'):
hoursAdded = timedelta(hours=5)
publishedDate = publishedDate + hoursAdded
break
2020-11-22 19:01:18 +00:00
pubDateStr = None
if publishedDate:
2020-11-22 20:33:24 +00:00
offset = publishedDate.utcoffset()
2020-11-22 20:37:08 +00:00
if offset:
publishedDate = publishedDate - offset
2020-11-22 19:09:35 +00:00
# convert local date to UTC
publishedDate = publishedDate.replace(tzinfo=timezone.utc)
2020-11-22 19:01:18 +00:00
pubDateStr = str(publishedDate)
if not pubDateStr.endswith('+00:00'):
pubDateStr += '+00:00'
return pubDateStr
2020-11-22 18:14:40 +00:00
def loadHashtagCategories(baseDir: str, language: str) -> None:
"""Loads an rss file containing hashtag categories
"""
hashtagCategoriesFilename = baseDir + '/categories.xml'
if not os.path.isfile(hashtagCategoriesFilename):
hashtagCategoriesFilename = \
baseDir + '/defaultcategories/' + language + '.xml'
if not os.path.isfile(hashtagCategoriesFilename):
return
with open(hashtagCategoriesFilename, 'r') as fp:
xmlStr = fp.read()
2020-12-05 14:43:29 +00:00
xml2StrToHashtagCategories(baseDir, xmlStr, 1024, True)
def xml2StrToHashtagCategories(baseDir: str, xmlStr: str,
2020-12-05 14:43:29 +00:00
maxCategoriesFeedItemSizeKb: int,
force=False) -> None:
2020-12-02 16:18:36 +00:00
"""Updates hashtag categories based upon an rss feed
"""
rssItems = xmlStr.split('<item>')
maxBytes = maxCategoriesFeedItemSizeKb * 1024
for rssItem in rssItems:
if not rssItem:
continue
if len(rssItem) > maxBytes:
print('WARN: rss categories feed item is too big')
continue
if '<title>' not in rssItem:
continue
if '</title>' not in rssItem:
continue
if '<description>' not in rssItem:
continue
if '</description>' not in rssItem:
continue
categoryStr = rssItem.split('<title>')[1]
categoryStr = categoryStr.split('</title>')[0].strip()
if not categoryStr:
continue
2020-12-03 10:12:09 +00:00
if 'CDATA' in categoryStr:
continue
2020-12-02 16:18:36 +00:00
hashtagListStr = rssItem.split('<description>')[1]
hashtagListStr = hashtagListStr.split('</description>')[0].strip()
if not hashtagListStr:
continue
2020-12-03 10:12:09 +00:00
if 'CDATA' in hashtagListStr:
continue
2020-12-02 16:18:36 +00:00
hashtagList = hashtagListStr.split(' ')
if not isBlockedHashtag(baseDir, categoryStr):
for hashtag in hashtagList:
2020-12-05 14:43:29 +00:00
setHashtagCategory(baseDir, hashtag, categoryStr, force)
2020-12-02 16:18:36 +00:00
def xml2StrToDict(baseDir: str, domain: str, xmlStr: str,
moderated: bool, mirrored: bool,
maxPostsPerSource: int,
2020-12-02 16:18:36 +00:00
maxFeedItemSizeKb: int,
maxCategoriesFeedItemSizeKb: int) -> {}:
2020-10-04 09:51:12 +00:00
"""Converts an xml 2.0 string to a dictionary
"""
if '<item>' not in xmlStr:
return {}
result = {}
2020-12-02 16:18:36 +00:00
if '<title>#categories</title>' in xmlStr:
xml2StrToHashtagCategories(baseDir, xmlStr,
2020-12-02 16:18:36 +00:00
maxCategoriesFeedItemSizeKb)
return {}
2020-10-04 09:51:12 +00:00
rssItems = xmlStr.split('<item>')
postCtr = 0
maxBytes = maxFeedItemSizeKb * 1024
2020-10-04 09:51:12 +00:00
for rssItem in rssItems:
2020-11-27 22:43:34 +00:00
if not rssItem:
continue
if len(rssItem) > maxBytes:
print('WARN: rss feed item is too big')
continue
2020-10-04 09:51:12 +00:00
if '<title>' not in rssItem:
continue
if '</title>' not in rssItem:
continue
if '<link>' not in rssItem:
continue
if '</link>' not in rssItem:
continue
if '<pubDate>' not in rssItem:
continue
if '</pubDate>' not in rssItem:
continue
title = rssItem.split('<title>')[1]
title = removeCDATA(title.split('</title>')[0])
2020-10-07 12:05:49 +00:00
description = ''
if '<description>' in rssItem and '</description>' in rssItem:
description = rssItem.split('<description>')[1]
description = removeCDATA(description.split('</description>')[0])
else:
if '<media:description>' in rssItem and \
'</media:description>' in rssItem:
description = rssItem.split('<media:description>')[1]
description = description.split('</media:description>')[0]
description = removeCDATA(description)
2020-10-04 09:51:12 +00:00
link = rssItem.split('<link>')[1]
link = link.split('</link>')[0]
if '://' not in link:
continue
2020-10-17 20:53:36 +00:00
itemDomain = link.split('://')[1]
if '/' in itemDomain:
itemDomain = itemDomain.split('/')[0]
if isBlockedDomain(baseDir, itemDomain):
continue
2020-10-04 09:51:12 +00:00
pubDate = rssItem.split('<pubDate>')[1]
pubDate = pubDate.split('</pubDate>')[0]
2020-11-22 18:14:40 +00:00
2020-11-22 19:01:18 +00:00
pubDateStr = parseFeedDate(pubDate)
if pubDateStr:
2020-10-09 08:40:41 +00:00
postFilename = ''
votesStatus = []
2020-10-17 16:08:07 +00:00
addNewswireDictEntry(baseDir, domain,
2020-11-22 18:21:13 +00:00
result, pubDateStr,
title, link,
votesStatus, postFilename,
description, moderated, mirrored)
postCtr += 1
if postCtr >= maxPostsPerSource:
break
2020-11-27 22:43:34 +00:00
if postCtr > 0:
print('Added ' + str(postCtr) + ' rss feed items to newswire')
2020-10-04 09:51:12 +00:00
return result
def atomFeedToDict(baseDir: str, domain: str, xmlStr: str,
moderated: bool, mirrored: bool,
maxPostsPerSource: int,
maxFeedItemSizeKb: int) -> {}:
2020-10-10 12:24:14 +00:00
"""Converts an atom feed string to a dictionary
"""
if '<entry>' not in xmlStr:
return {}
result = {}
2020-11-22 12:41:54 +00:00
atomItems = xmlStr.split('<entry>')
postCtr = 0
maxBytes = maxFeedItemSizeKb * 1024
2020-11-22 12:41:54 +00:00
for atomItem in atomItems:
2020-11-27 22:43:34 +00:00
if not atomItem:
continue
2020-11-22 12:41:54 +00:00
if len(atomItem) > maxBytes:
print('WARN: atom feed item is too big')
continue
2020-11-22 12:41:54 +00:00
if '<title>' not in atomItem:
2020-10-10 12:24:14 +00:00
continue
2020-11-22 12:41:54 +00:00
if '</title>' not in atomItem:
2020-10-10 12:24:14 +00:00
continue
2020-11-22 12:41:54 +00:00
if '<link>' not in atomItem:
2020-10-10 12:24:14 +00:00
continue
2020-11-22 12:41:54 +00:00
if '</link>' not in atomItem:
2020-10-10 12:24:14 +00:00
continue
2020-11-22 12:41:54 +00:00
if '<updated>' not in atomItem:
2020-10-10 12:24:14 +00:00
continue
2020-11-22 12:41:54 +00:00
if '</updated>' not in atomItem:
2020-10-10 12:24:14 +00:00
continue
2020-11-22 12:41:54 +00:00
title = atomItem.split('<title>')[1]
title = removeCDATA(title.split('</title>')[0])
2020-10-10 12:24:14 +00:00
description = ''
2020-11-22 12:41:54 +00:00
if '<summary>' in atomItem and '</summary>' in atomItem:
description = atomItem.split('<summary>')[1]
description = removeCDATA(description.split('</summary>')[0])
else:
2020-11-22 12:41:54 +00:00
if '<media:description>' in atomItem and \
'</media:description>' in atomItem:
description = atomItem.split('<media:description>')[1]
description = description.split('</media:description>')[0]
description = removeCDATA(description)
2020-11-22 12:41:54 +00:00
link = atomItem.split('<link>')[1]
2020-10-10 12:24:14 +00:00
link = link.split('</link>')[0]
if '://' not in link:
continue
2020-10-17 20:53:36 +00:00
itemDomain = link.split('://')[1]
if '/' in itemDomain:
itemDomain = itemDomain.split('/')[0]
if isBlockedDomain(baseDir, itemDomain):
continue
2020-11-22 12:41:54 +00:00
pubDate = atomItem.split('<updated>')[1]
2020-10-10 12:24:14 +00:00
pubDate = pubDate.split('</updated>')[0]
2020-11-22 18:14:40 +00:00
2020-11-22 19:01:18 +00:00
pubDateStr = parseFeedDate(pubDate)
if pubDateStr:
2020-10-10 12:24:14 +00:00
postFilename = ''
votesStatus = []
2020-10-17 16:08:07 +00:00
addNewswireDictEntry(baseDir, domain,
2020-11-22 18:28:27 +00:00
result, pubDateStr,
title, link,
votesStatus, postFilename,
description, moderated, mirrored)
postCtr += 1
if postCtr >= maxPostsPerSource:
break
2020-11-27 22:43:34 +00:00
if postCtr > 0:
print('Added ' + str(postCtr) + ' atom feed items to newswire')
2020-10-10 12:24:14 +00:00
return result
2020-11-22 10:34:42 +00:00
def atomFeedYTToDict(baseDir: str, domain: str, xmlStr: str,
moderated: bool, mirrored: bool,
maxPostsPerSource: int,
maxFeedItemSizeKb: int) -> {}:
"""Converts an atom-style YouTube feed string to a dictionary
"""
if '<entry>' not in xmlStr:
return {}
if isBlockedDomain(baseDir, 'www.youtube.com'):
return {}
result = {}
2020-11-22 12:41:54 +00:00
atomItems = xmlStr.split('<entry>')
2020-11-22 10:34:42 +00:00
postCtr = 0
maxBytes = maxFeedItemSizeKb * 1024
2020-11-22 12:41:54 +00:00
for atomItem in atomItems:
2020-11-27 22:43:34 +00:00
if not atomItem:
continue
if not atomItem.strip():
continue
2020-11-22 12:41:54 +00:00
if len(atomItem) > maxBytes:
2020-11-22 10:34:42 +00:00
print('WARN: atom feed item is too big')
continue
2020-11-22 12:41:54 +00:00
if '<title>' not in atomItem:
2020-11-22 10:34:42 +00:00
continue
2020-11-22 12:41:54 +00:00
if '</title>' not in atomItem:
2020-11-22 10:34:42 +00:00
continue
if '<published>' not in atomItem:
2020-11-22 10:34:42 +00:00
continue
if '</published>' not in atomItem:
2020-11-22 10:34:42 +00:00
continue
2020-11-22 12:41:54 +00:00
if '<yt:videoId>' not in atomItem:
2020-11-22 10:34:42 +00:00
continue
2020-11-22 12:41:54 +00:00
if '</yt:videoId>' not in atomItem:
2020-11-22 10:34:42 +00:00
continue
2020-11-22 12:41:54 +00:00
title = atomItem.split('<title>')[1]
title = removeCDATA(title.split('</title>')[0])
2020-11-22 10:34:42 +00:00
description = ''
2020-11-22 12:41:54 +00:00
if '<media:description>' in atomItem and \
'</media:description>' in atomItem:
description = atomItem.split('<media:description>')[1]
2020-11-22 10:34:42 +00:00
description = description.split('</media:description>')[0]
description = removeCDATA(description)
2020-11-22 12:41:54 +00:00
elif '<summary>' in atomItem and '</summary>' in atomItem:
description = atomItem.split('<summary>')[1]
2020-11-22 10:34:42 +00:00
description = description.split('</summary>')[0]
description = removeCDATA(description)
2020-11-22 12:41:54 +00:00
link = atomItem.split('<yt:videoId>')[1]
2020-11-22 10:34:42 +00:00
link = link.split('</yt:videoId>')[0]
link = 'https://www.youtube.com/watch?v=' + link.strip()
pubDate = atomItem.split('<published>')[1]
pubDate = pubDate.split('</published>')[0]
2020-11-22 18:14:40 +00:00
2020-11-22 19:01:18 +00:00
pubDateStr = parseFeedDate(pubDate)
if pubDateStr:
2020-11-22 10:34:42 +00:00
postFilename = ''
votesStatus = []
addNewswireDictEntry(baseDir, domain,
2020-11-22 18:28:27 +00:00
result, pubDateStr,
2020-11-22 10:34:42 +00:00
title, link,
votesStatus, postFilename,
description, moderated, mirrored)
postCtr += 1
if postCtr >= maxPostsPerSource:
break
2020-11-27 22:43:34 +00:00
if postCtr > 0:
print('Added ' + str(postCtr) + ' YouTube feed items to newswire')
2020-11-22 10:34:42 +00:00
return result
def xmlStrToDict(baseDir: str, domain: str, xmlStr: str,
moderated: bool, mirrored: bool,
maxPostsPerSource: int,
2020-12-02 17:02:32 +00:00
maxFeedItemSizeKb: int,
maxCategoriesFeedItemSizeKb: int) -> {}:
2020-10-04 09:51:12 +00:00
"""Converts an xml string to a dictionary
"""
2020-11-22 16:10:58 +00:00
if '<yt:videoId>' in xmlStr and '<yt:channelId>' in xmlStr:
print('YouTube feed: reading')
return atomFeedYTToDict(baseDir, domain,
xmlStr, moderated, mirrored,
maxPostsPerSource, maxFeedItemSizeKb)
elif 'rss version="2.0"' in xmlStr:
2020-10-17 16:08:07 +00:00
return xml2StrToDict(baseDir, domain,
xmlStr, moderated, mirrored,
2020-12-02 17:02:32 +00:00
maxPostsPerSource, maxFeedItemSizeKb,
maxCategoriesFeedItemSizeKb)
2020-10-10 12:24:14 +00:00
elif 'xmlns="http://www.w3.org/2005/Atom"' in xmlStr:
2020-10-17 16:08:07 +00:00
return atomFeedToDict(baseDir, domain,
xmlStr, moderated, mirrored,
maxPostsPerSource, maxFeedItemSizeKb)
2020-10-04 09:51:12 +00:00
return {}
def YTchannelToAtomFeed(url: str) -> str:
"""Converts a YouTube channel url into an atom feed url
"""
if 'youtube.com/channel/' not in url:
return url
2020-11-22 12:27:42 +00:00
channelId = url.split('youtube.com/channel/')[1].strip()
2020-11-22 12:36:21 +00:00
channelUrl = \
'https://www.youtube.com/feeds/videos.xml?channel_id=' + channelId
print('YouTube feed: ' + channelUrl)
return channelUrl
def getRSS(baseDir: str, domain: str, session, url: str,
moderated: bool, mirrored: bool,
maxPostsPerSource: int, maxFeedSizeKb: int,
2020-12-02 17:02:32 +00:00
maxFeedItemSizeKb: int,
maxCategoriesFeedItemSizeKb: int) -> {}:
2020-10-04 09:51:12 +00:00
"""Returns an RSS url as a dict
"""
if not isinstance(url, str):
print('url: ' + str(url))
print('ERROR: getRSS url should be a string')
return None
headers = {
'Accept': 'text/xml; charset=UTF-8'
}
params = None
sessionParams = {}
sessionHeaders = {}
if headers:
sessionHeaders = headers
if params:
sessionParams = params
sessionHeaders['User-Agent'] = \
'Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0'
if not session:
print('WARN: no session specified for getRSS')
url = YTchannelToAtomFeed(url)
2020-10-04 09:51:12 +00:00
try:
result = session.get(url, headers=sessionHeaders, params=sessionParams)
if result:
2020-10-16 12:03:56 +00:00
if int(len(result.text) / 1024) < maxFeedSizeKb and \
not containsInvalidChars(result.text):
return xmlStrToDict(baseDir, domain, result.text,
moderated, mirrored,
maxPostsPerSource,
2020-12-02 17:02:32 +00:00
maxFeedItemSizeKb,
maxCategoriesFeedItemSizeKb)
else:
2020-11-22 12:43:22 +00:00
print('WARN: feed is too large, ' +
'or contains invalid characters: ' + url)
2020-11-22 13:04:58 +00:00
else:
print('WARN: no result returned for feed ' + url)
2020-10-04 09:51:12 +00:00
except requests.exceptions.RequestException as e:
print('ERROR: getRSS failed\nurl: ' + str(url) + '\n' +
'headers: ' + str(sessionHeaders) + '\n' +
'params: ' + str(sessionParams) + '\n')
print(e)
except ValueError as e:
print('ERROR: getRSS failed\nurl: ' + str(url) + '\n' +
'headers: ' + str(sessionHeaders) + '\n' +
'params: ' + str(sessionParams) + '\n')
print(e)
except SocketError as e:
if e.errno == errno.ECONNRESET:
print('WARN: connection was reset during getRSS')
print(e)
return None
2020-10-04 12:29:07 +00:00
def getRSSfromDict(baseDir: str, newswire: {},
httpPrefix: str, domainFull: str,
title: str, translate: {}) -> str:
"""Returns an rss feed from the current newswire dict.
This allows other instances to subscribe to the same newswire
"""
rssStr = rss2Header(httpPrefix,
None, domainFull,
'Newswire', translate)
2020-11-03 14:41:28 +00:00
if not newswire:
return ''
2020-10-04 12:29:07 +00:00
for published, fields in newswire.items():
2020-10-20 12:22:52 +00:00
if '+00:00' in published:
published = published.replace('+00:00', 'Z').strip()
published = published.replace(' ', 'T')
else:
publishedWithOffset = \
2020-10-20 12:37:32 +00:00
datetime.strptime(published, "%Y-%m-%d %H:%M:%S%z")
2020-10-20 12:22:52 +00:00
published = publishedWithOffset.strftime("%Y-%m-%dT%H:%M:%SZ")
2020-10-04 22:08:13 +00:00
try:
2020-10-04 22:12:27 +00:00
pubDate = datetime.strptime(published, "%Y-%m-%dT%H:%M:%SZ")
2020-10-20 12:28:15 +00:00
except Exception as e:
print('WARN: Unable to convert date ' + published + ' ' + str(e))
2020-10-04 22:08:13 +00:00
continue
2020-10-04 12:29:07 +00:00
rssStr += '<item>\n'
rssStr += ' <title>' + fields[0] + '</title>\n'
description = removeCDATA(firstParagraphFromString(fields[4]))
rssStr += ' <description>' + description + '</description>\n'
2020-10-08 15:07:06 +00:00
url = fields[1]
2020-11-08 11:04:52 +00:00
if '://' not in url:
if domainFull not in url:
url = httpPrefix + '://' + domainFull + url
2020-10-08 15:07:06 +00:00
rssStr += ' <link>' + url + '</link>\n'
2020-10-04 22:12:27 +00:00
2020-10-04 12:29:07 +00:00
rssDateStr = pubDate.strftime("%a, %d %b %Y %H:%M:%S UT")
rssStr += ' <pubDate>' + rssDateStr + '</pubDate>\n'
rssStr += '</item>\n'
rssStr += rss2Footer()
return rssStr
2020-10-25 10:45:42 +00:00
def isNewswireBlogPost(postJsonObject: {}) -> bool:
"""Is the given object a blog post?
2020-10-25 10:47:39 +00:00
There isn't any difference between a blog post and a newswire blog post
but we may here need to check for different properties than
isBlogPost does
"""
if not postJsonObject:
return False
if not postJsonObject.get('object'):
return False
if not isinstance(postJsonObject['object'], dict):
return False
if postJsonObject['object'].get('summary') and \
postJsonObject['object'].get('url') and \
2020-11-08 09:47:01 +00:00
postJsonObject['object'].get('content') and \
postJsonObject['object'].get('published'):
2020-10-25 10:42:38 +00:00
return isPublicPost(postJsonObject)
return False
2020-10-16 20:13:23 +00:00
def getHashtagsFromPost(postJsonObject: {}) -> []:
"""Returns a list of any hashtags within a post
"""
if not postJsonObject.get('object'):
return []
if not isinstance(postJsonObject['object'], dict):
return []
if not postJsonObject['object'].get('tag'):
return []
2020-10-18 09:28:43 +00:00
if not isinstance(postJsonObject['object']['tag'], list):
2020-10-16 20:13:23 +00:00
return []
tags = []
2020-10-18 09:28:43 +00:00
for tg in postJsonObject['object']['tag']:
2020-10-16 20:13:23 +00:00
if not isinstance(tg, dict):
continue
if not tg.get('name'):
continue
if not tg.get('type'):
continue
if tg['type'] != 'Hashtag':
continue
if tg['name'] not in tags:
tags.append(tg['name'])
return tags
2020-10-05 11:11:48 +00:00
def addAccountBlogsToNewswire(baseDir: str, nickname: str, domain: str,
newswire: {},
maxBlogsPerAccount: int,
indexFilename: str,
maxTags: int) -> None:
2020-10-05 11:11:48 +00:00
"""Adds blogs for the given account to the newswire
"""
if not os.path.isfile(indexFilename):
return
# local blog entries are unmoderated by default
moderated = False
# local blogs can potentially be moderated
moderatedFilename = \
baseDir + '/accounts/' + nickname + '@' + domain + \
'/.newswiremoderated'
if os.path.isfile(moderatedFilename):
moderated = True
2020-10-05 11:11:48 +00:00
with open(indexFilename, 'r') as indexFile:
postFilename = 'start'
ctr = 0
while postFilename:
postFilename = indexFile.readline()
if postFilename:
# if this is a full path then remove the directories
if '/' in postFilename:
postFilename = postFilename.split('/')[-1]
# filename of the post without any extension or path
# This should also correspond to any index entry in
# the posts cache
postUrl = \
postFilename.replace('\n', '').replace('\r', '')
postUrl = postUrl.replace('.json', '').strip()
# read the post from file
fullPostFilename = \
locatePost(baseDir, nickname,
domain, postUrl, False)
2020-10-06 13:05:15 +00:00
if not fullPostFilename:
print('Unable to locate post ' + postUrl)
ctr += 1
if ctr >= maxBlogsPerAccount:
break
2020-10-06 13:34:04 +00:00
continue
2020-10-06 13:05:15 +00:00
2020-10-05 11:11:48 +00:00
postJsonObject = None
if fullPostFilename:
postJsonObject = loadJson(fullPostFilename)
2020-10-25 10:45:42 +00:00
if isNewswireBlogPost(postJsonObject):
published = postJsonObject['object']['published']
published = published.replace('T', ' ')
published = published.replace('Z', '+00:00')
2020-10-06 20:17:34 +00:00
votes = []
if os.path.isfile(fullPostFilename + '.votes'):
votes = loadJson(fullPostFilename + '.votes')
content = postJsonObject['object']['content']
description = firstParagraphFromString(content)
description = removeCDATA(description)
2020-10-17 16:08:07 +00:00
addNewswireDictEntry(baseDir, domain,
newswire, published,
postJsonObject['object']['summary'],
postJsonObject['object']['url'],
votes, fullPostFilename,
description, moderated, False,
getHashtagsFromPost(postJsonObject),
maxTags)
2020-10-05 11:11:48 +00:00
ctr += 1
if ctr >= maxBlogsPerAccount:
break
2020-10-17 16:08:07 +00:00
def addBlogsToNewswire(baseDir: str, domain: str, newswire: {},
maxBlogsPerAccount: int,
maxTags: int) -> None:
"""Adds blogs from each user account into the newswire
2020-10-06 09:37:22 +00:00
"""
moderationDict = {}
2020-10-05 11:11:48 +00:00
# go through each account
for subdir, dirs, files in os.walk(baseDir + '/accounts'):
for handle in dirs:
if '@' not in handle:
continue
if 'inbox@' in handle:
continue
2020-10-06 09:37:22 +00:00
nickname = handle.split('@')[0]
# has this account been suspended?
2020-10-06 08:58:44 +00:00
if isSuspended(baseDir, nickname):
continue
2020-10-06 21:28:40 +00:00
if os.path.isfile(baseDir + '/accounts/' + handle +
'/.nonewswire'):
continue
2020-10-05 11:11:48 +00:00
# is there a blogs timeline for this account?
2020-10-06 09:41:04 +00:00
accountDir = os.path.join(baseDir + '/accounts', handle)
2020-10-05 11:11:48 +00:00
blogsIndex = accountDir + '/tlblogs.index'
if os.path.isfile(blogsIndex):
domain = handle.split('@')[1]
addAccountBlogsToNewswire(baseDir, nickname, domain,
newswire, maxBlogsPerAccount,
blogsIndex, maxTags)
2020-10-05 11:11:48 +00:00
# sort the moderation dict into chronological order, latest first
sortedModerationDict = \
OrderedDict(sorted(moderationDict.items(), reverse=True))
2020-10-06 12:15:35 +00:00
# save the moderation queue details for later display
newswireModerationFilename = baseDir + '/accounts/newswiremoderation.txt'
if sortedModerationDict:
saveJson(sortedModerationDict, newswireModerationFilename)
else:
# remove the file if there is nothing to moderate
if os.path.isfile(newswireModerationFilename):
os.remove(newswireModerationFilename)
2020-10-05 11:11:48 +00:00
2020-10-17 16:08:07 +00:00
def getDictFromNewswire(session, baseDir: str, domain: str,
maxPostsPerSource: int, maxFeedSizeKb: int,
maxTags: int, maxFeedItemSizeKb: int,
2020-12-02 17:02:32 +00:00
maxNewswirePosts: int,
maxCategoriesFeedItemSizeKb: int) -> {}:
2020-10-04 09:59:55 +00:00
"""Gets rss feeds as a dictionary from newswire file
2020-10-04 09:51:12 +00:00
"""
2020-10-04 09:59:55 +00:00
subscriptionsFilename = baseDir + '/accounts/newswire.txt'
2020-10-04 09:51:12 +00:00
if not os.path.isfile(subscriptionsFilename):
return {}
maxPostsPerSource = 5
2020-10-05 11:11:48 +00:00
# add rss feeds
2020-10-04 09:51:12 +00:00
rssFeed = []
with open(subscriptionsFilename, 'r') as fp:
rssFeed = fp.readlines()
result = {}
for url in rssFeed:
url = url.strip()
# Does this contain a url?
2020-10-04 09:51:12 +00:00
if '://' not in url:
continue
# is this a comment?
2020-10-04 09:51:12 +00:00
if url.startswith('#'):
continue
# should this feed be moderated?
moderated = False
if '*' in url:
moderated = True
url = url.replace('*', '').strip()
# should this feed content be mirrored?
mirrored = False
if '!' in url:
mirrored = True
url = url.replace('!', '').strip()
itemsList = getRSS(baseDir, domain, session, url,
moderated, mirrored,
maxPostsPerSource, maxFeedSizeKb,
2020-12-02 17:02:32 +00:00
maxFeedItemSizeKb,
maxCategoriesFeedItemSizeKb)
2020-11-03 15:04:33 +00:00
if itemsList:
for dateStr, item in itemsList.items():
result[dateStr] = item
2020-10-05 11:11:48 +00:00
# add blogs from each user account
addBlogsToNewswire(baseDir, domain, result,
maxPostsPerSource, maxTags)
2020-10-05 11:11:48 +00:00
# sort into chronological order, latest first
2020-10-04 21:45:46 +00:00
sortedResult = OrderedDict(sorted(result.items(), reverse=True))
# are there too many posts? If so then remove the oldest ones
noOfPosts = len(sortedResult.items())
if noOfPosts > maxNewswirePosts:
2020-11-22 12:05:15 +00:00
ctr = 0
removals = []
for dateStr, item in sortedResult.items():
ctr += 1
2020-11-22 12:25:53 +00:00
if ctr > maxNewswirePosts:
2020-11-22 12:05:15 +00:00
removals.append(dateStr)
for r in removals:
sortedResult.pop(r)
2020-10-04 09:51:12 +00:00
return sortedResult