Merge branch 'bugfix-dont-call-fetch-page-in-constructor' of https://github.com/coffeemakr/NewPipeExtractor into f

This commit is contained in:
Christian Schabesberger 2017-11-30 15:39:27 +01:00
commit 18e486da30
30 changed files with 104 additions and 62 deletions

View File

@ -24,4 +24,12 @@ task sourcesJar(type: Jar, dependsOn: classes) {
artifacts {
archives sourcesJar
}
tasks.withType(Test) {
testLogging {
events "skipped", "failed"
showStandardStreams = true
exceptionFormat = 'full'
}
}

View File

@ -33,12 +33,16 @@ public abstract class Extractor {
*/
@Nullable
private String cleanUrl;
private boolean pageFetched = false;
private final Downloader downloader;
public Extractor(StreamingService service, String url) throws ExtractionException {
if(service == null) throw new NullPointerException("service is null");
if(url == null) throw new NullPointerException("url is null");
this.service = service;
this.originalUrl = url;
this.downloader = NewPipe.getDownloader();
if(downloader == null) throw new NullPointerException("downloader is null");
}
/**
@ -49,8 +53,26 @@ public abstract class Extractor {
/**
* Fetch the current page.
* @throws IOException if the page can not be loaded
* @throws ExtractionException if the pages content is not understood
*/
public abstract void fetchPage() throws IOException, ExtractionException;
public void fetchPage() throws IOException, ExtractionException {
if(pageFetched) return;
onFetchPage(downloader);
pageFetched = true;
}
protected void assertPageFetched() {
if(!pageFetched) throw new IllegalStateException("Page is not fetched. Make sure you call fetchPage()");
}
/**
* Fetch the current page.
* @param downloader the download to use
* @throws IOException if the page can not be loaded
* @throws ExtractionException if the pages content is not understood
*/
public abstract void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException;
@Nonnull
public abstract String getId() throws ParsingException;

View File

@ -16,7 +16,7 @@ public abstract class Info implements Serializable {
public final String url;
public final String name;
public List<Throwable> errors = new ArrayList<>();
public final List<Throwable> errors = new ArrayList<>();
public void addError(Throwable throwable) {
this.errors.add(throwable);

View File

@ -3,9 +3,10 @@ package org.schabi.newpipe.extractor;
import org.schabi.newpipe.extractor.stream.SubtitlesFormat;
public class Subtitles {
private SubtitlesFormat format;
private String languageCode, URL;
private boolean autoGenerated;
private final SubtitlesFormat format;
private final String languageCode;
private final String URL;
private final boolean autoGenerated;
public Subtitles(SubtitlesFormat format, String languageCode, String URL, boolean autoGenerated) {
this.format = format;

View File

@ -27,7 +27,7 @@ import java.util.List;
public abstract class SuggestionExtractor {
private int serviceId;
private final int serviceId;
public SuggestionExtractor(int serviceId) {
this.serviceId = serviceId;

View File

@ -30,7 +30,7 @@ import java.io.IOException;
public abstract class KioskExtractor extends ListExtractor {
private String contentCountry = null;
private String id = null;
private final String id;
public KioskExtractor(StreamingService streamingService,
String url,

View File

@ -19,8 +19,8 @@ public class KioskList {
throws ExtractionException, IOException;
}
private int service_id;
private HashMap<String, KioskEntry> kioskList = new HashMap<>();
private final int service_id;
private final HashMap<String, KioskEntry> kioskList = new HashMap<>();
private String defaultKiosk = null;
private class KioskEntry {
@ -28,8 +28,8 @@ public class KioskList {
extractorFactory = ef;
handler = h;
}
KioskExtractorFactory extractorFactory;
UrlIdHandler handler;
final KioskExtractorFactory extractorFactory;
final UrlIdHandler handler;
}
public KioskList(int service_id) {

View File

@ -35,7 +35,7 @@ public abstract class SearchEngine {
}
}
private InfoItemSearchCollector collector;
private final InfoItemSearchCollector collector;
public SearchEngine(int serviceId) {
collector = new InfoItemSearchCollector(serviceId);

View File

@ -24,14 +24,13 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
}
@Override
public void fetchPage() throws IOException, ExtractionException {
Downloader dl = NewPipe.getDownloader();
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
userId = getUrlIdHandler().getId(getOriginalUrl());
String apiUrl = "https://api.soundcloud.com/users/" + userId +
"?client_id=" + SoundcloudParsingHelper.clientId();
String response = dl.download(apiUrl);
String response = downloader.download(apiUrl);
try {
user = JsonParser.object().from(response);
} catch (JsonParserException e) {

View File

@ -4,7 +4,7 @@ import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemExtractor;
public class SoundcloudChannelInfoItemExtractor implements ChannelInfoItemExtractor {
private JsonObject searchResult;
private final JsonObject searchResult;
public SoundcloudChannelInfoItemExtractor(JsonObject searchResult) {
this.searchResult = searchResult;

View File

@ -4,6 +4,7 @@ import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.schabi.newpipe.extractor.Downloader;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -23,7 +24,7 @@ public class SoundcloudChartsExtractor extends KioskExtractor {
}
@Override
public void fetchPage() {
public void onFetchPage(@Nonnull Downloader downloader) {
}
@Nonnull

View File

@ -67,13 +67,13 @@ public class SoundcloudParsingHelper {
*
* See https://developers.soundcloud.com/docs/api/reference#resolve
*/
public static JsonObject resolveFor(String url) throws IOException, ReCaptchaException, ParsingException {
public static JsonObject resolveFor(Downloader downloader, String url) throws IOException, ReCaptchaException, ParsingException {
String apiUrl = "https://api.soundcloud.com/resolve"
+ "?url=" + URLEncoder.encode(url, "UTF-8")
+ "&client_id=" + clientId();
try {
return JsonParser.object().from(NewPipe.getDownloader().download(apiUrl));
return JsonParser.object().from(downloader.download(apiUrl));
} catch (JsonParserException e) {
throw new ParsingException("Could not parse json response", e);
}

View File

@ -24,15 +24,14 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
}
@Override
public void fetchPage() throws IOException, ExtractionException {
Downloader dl = NewPipe.getDownloader();
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
playlistId = getUrlIdHandler().getId(getOriginalUrl());
String apiUrl = "https://api.soundcloud.com/playlists/" + playlistId +
"?client_id=" + SoundcloudParsingHelper.clientId() +
"&representation=compact";
String response = dl.download(apiUrl);
String response = downloader.download(apiUrl);
try {
playlist = JsonParser.object().from(response);
} catch (JsonParserException e) {

View File

@ -9,7 +9,7 @@ public class SoundcloudPlaylistInfoItemExtractor implements PlaylistInfoItemExtr
private static final String AVATAR_URL_KEY = "avatar_url";
private static final String ARTWORK_URL_KEY = "artwork_url";
private JsonObject searchResult;
private final JsonObject searchResult;
public SoundcloudPlaylistInfoItemExtractor(JsonObject searchResult) {
this.searchResult = searchResult;

View File

@ -21,12 +21,11 @@ public class SoundcloudStreamExtractor extends StreamExtractor {
public SoundcloudStreamExtractor(StreamingService service, String url) throws IOException, ExtractionException {
super(service, url);
fetchPage();
}
@Override
public void fetchPage() throws IOException, ExtractionException {
track = SoundcloudParsingHelper.resolveFor(getOriginalUrl());
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
track = SoundcloudParsingHelper.resolveFor(downloader, getOriginalUrl());
String policy = track.getString("policy", "");
if (!policy.equals("ALLOW") && !policy.equals("MONETIZE")) {

View File

@ -146,8 +146,8 @@ public class ItagItem {
return mediaFormat;
}
public int id;
public ItagType itagType;
public final int id;
public final ItagType itagType;
// Audio fields
public int avgBitrate = -1;

View File

@ -59,9 +59,7 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
}
@Override
public void fetchPage() throws IOException, ExtractionException {
Downloader downloader = NewPipe.getDownloader();
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
String channelUrl = super.getCleanUrl() + CHANNEL_URL_PARAMETERS;
String pageContent = downloader.download(channelUrl);
doc = Jsoup.parse(pageContent, channelUrl);

View File

@ -26,7 +26,7 @@ import org.schabi.newpipe.extractor.utils.Utils;
*/
public class YoutubeChannelInfoItemExtractor implements ChannelInfoItemExtractor {
private Element el;
private final Element el;
public YoutubeChannelInfoItemExtractor(Element el) {
this.el = el;

View File

@ -36,9 +36,7 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
}
@Override
public void fetchPage() throws IOException, ExtractionException {
Downloader downloader = NewPipe.getDownloader();
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
String pageContent = downloader.download(getCleanUrl());
doc = Jsoup.parse(pageContent, getCleanUrl());

View File

@ -6,7 +6,7 @@ import org.schabi.newpipe.extractor.playlist.PlaylistInfoItemExtractor;
import org.schabi.newpipe.extractor.utils.Utils;
public class YoutubePlaylistInfoItemExtractor implements PlaylistInfoItemExtractor {
private Element el;
private final Element el;
public YoutubePlaylistInfoItemExtractor(Element el) {
this.el = el;

View File

@ -86,7 +86,6 @@ public class YoutubeStreamExtractor extends StreamExtractor {
public YoutubeStreamExtractor(StreamingService service, String url) throws IOException, ExtractionException {
super(service, url);
fetchPage();
}
/*//////////////////////////////////////////////////////////////////////////
@ -106,6 +105,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Nonnull
@Override
public String getName() throws ParsingException {
assertPageFetched();
String name = getStringFromMetaData("title");
if(name == null) {
// Fallback to HTML method
@ -124,6 +124,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Nonnull
@Override
public String getUploadDate() throws ParsingException {
assertPageFetched();
try {
return doc.select("meta[itemprop=datePublished]").attr(CONTENT);
} catch (Exception e) {//todo: add fallback method
@ -134,6 +135,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Nonnull
@Override
public String getThumbnailUrl() throws ParsingException {
assertPageFetched();
// Try to get high resolution thumbnail first, if it fails, use low res from the player instead
try {
return doc.select("link[itemprop=\"thumbnailUrl\"]").first().attr("abs:href");
@ -157,6 +159,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Nonnull
@Override
public String getDescription() throws ParsingException {
assertPageFetched();
try {
return doc.select("p[id=\"eow-description\"]").first().html();
} catch (Exception e) {//todo: add fallback method <-- there is no ... as long as i know
@ -166,6 +169,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public int getAgeLimit() throws ParsingException {
assertPageFetched();
if (!isAgeRestricted) {
return NO_AGE_LIMIT;
}
@ -179,6 +183,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public long getLength() throws ParsingException {
assertPageFetched();
if(playerArgs != null) {
try {
long returnValue = Long.parseLong(playerArgs.get("length_seconds") + "");
@ -217,6 +222,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public long getViewCount() throws ParsingException {
assertPageFetched();
try {
return Long.parseLong(doc.select("meta[itemprop=interactionCount]").attr(CONTENT));
} catch (Exception e) {//todo: find fallback method
@ -226,6 +232,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public long getLikeCount() throws ParsingException {
assertPageFetched();
String likesString = "";
try {
Element button = doc.select("button.like-button-renderer-like-button").first();
@ -245,6 +252,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public long getDislikeCount() throws ParsingException {
assertPageFetched();
String dislikesString = "";
try {
Element button = doc.select("button.like-button-renderer-dislike-button").first();
@ -265,6 +273,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Nonnull
@Override
public String getUploaderUrl() throws ParsingException {
assertPageFetched();
try {
return doc.select("div[class=\"yt-user-info\"]").first().children()
.select("a").first().attr("abs:href");
@ -276,6 +285,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Nullable
private String getStringFromMetaData(String field) {
assertPageFetched();
String value = null;
if(playerArgs != null) {
// This can not fail
@ -291,6 +301,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Nonnull
@Override
public String getUploaderName() throws ParsingException {
assertPageFetched();
String name = getStringFromMetaData("author");
if(name == null) {
@ -310,6 +321,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Nonnull
@Override
public String getUploaderAvatarUrl() throws ParsingException {
assertPageFetched();
try {
return doc.select("a[class*=\"yt-user-photo\"]").first()
.select("img").first()
@ -321,6 +333,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public String getDashMpdUrl() throws ParsingException {
assertPageFetched();
try {
String dashManifestUrl;
if (videoInfoPage.containsKey("dashmpd")) {
@ -347,6 +360,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public List<AudioStream> getAudioStreams() throws IOException, ExtractionException {
assertPageFetched();
List<AudioStream> audioStreams = new ArrayList<>();
try {
for (Map.Entry<String, ItagItem> entry : getItags(ADAPTIVE_FMTS, ItagItem.ItagType.AUDIO).entrySet()) {
@ -366,6 +380,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public List<VideoStream> getVideoStreams() throws IOException, ExtractionException {
assertPageFetched();
List<VideoStream> videoStreams = new ArrayList<>();
try {
for (Map.Entry<String, ItagItem> entry : getItags(URL_ENCODED_FMT_STREAM_MAP, ItagItem.ItagType.VIDEO).entrySet()) {
@ -385,6 +400,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public List<VideoStream> getVideoOnlyStreams() throws IOException, ExtractionException {
assertPageFetched();
List<VideoStream> videoOnlyStreams = new ArrayList<>();
try {
for (Map.Entry<String, ItagItem> entry : getItags(ADAPTIVE_FMTS, ItagItem.ItagType.VIDEO_ONLY).entrySet()) {
@ -411,11 +427,13 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
@Nullable
public List<Subtitles> getSubtitles(SubtitlesFormat format) throws IOException, ExtractionException {
assertPageFetched();
if(isAgeRestricted) {
// If the video is age restricted getPlayerConfig will fail
return null;
}
JsonObject playerConfig = getPlayerConfig(getPageHtml());
// TODO: This should be done in onFetchPage()
JsonObject playerConfig = getPlayerConfig(getPageHtml(NewPipe.getDownloader()));
String playerResponse = playerConfig.getObject("args").getString("player_response");
JsonObject captions;
@ -459,6 +477,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public StreamInfoItem getNextVideo() throws IOException, ExtractionException {
assertPageFetched();
try {
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
collector.commit(extractVideoPreviewInfo(doc.select("div[class=\"watch-sidebar-section\"]")
@ -472,6 +491,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public StreamInfoItemCollector getRelatedVideos() throws IOException, ExtractionException {
assertPageFetched();
try {
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
Element ul = doc.select("ul[id=\"watch-related\"]").first();
@ -526,30 +546,27 @@ public class YoutubeStreamExtractor extends StreamExtractor {
private static final String GET_VIDEO_INFO_URL = "https://www.youtube.com/get_video_info?video_id=" + "%s" +
"&el=info&ps=default&eurl=&gl=US&hl=en";
private static volatile String decryptionCode = "";
private volatile String decryptionCode = "";
private static String pageHtml = null;
private String pageHtml = null;
private String getPageHtml() throws IOException, ExtractionException{
private String getPageHtml(Downloader downloader) throws IOException, ExtractionException{
if (pageHtml == null) {
Downloader dl = NewPipe.getDownloader();
pageHtml = dl.download(getCleanUrl());
pageHtml = downloader.download(getCleanUrl());
}
return pageHtml;
}
@Override
public void fetchPage() throws IOException, ExtractionException {
Downloader dl = NewPipe.getDownloader();
String pageContent = getPageHtml();
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
String pageContent = getPageHtml(downloader);
doc = Jsoup.parse(pageContent, getCleanUrl());
String playerUrl;
// Check if the video is age restricted
if (pageContent.contains("<meta property=\"og:restrictions:age")) {
String infoPageResponse = dl.download(String.format(GET_VIDEO_INFO_URL, getId()));
String infoPageResponse = downloader.download(String.format(GET_VIDEO_INFO_URL, getId()));
videoInfoPage.putAll(Parser.compatParseMap(infoPageResponse));
playerUrl = getPlayerUrlFromRestrictedVideo();
isAgeRestricted = true;

View File

@ -42,9 +42,7 @@ public class YoutubeTrendingExtractor extends KioskExtractor {
}
@Override
public void fetchPage() throws IOException, ExtractionException {
Downloader downloader = NewPipe.getDownloader();
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
final String contentCountry = getContentCountry();
String url = getCleanUrl();
if(contentCountry != null && !contentCountry.isEmpty()) {

View File

@ -236,7 +236,8 @@ public class StreamInfo extends Info {
* Fills out the video info fields which are common to all services.
* Probably needs to be overridden by subclasses
*/
public static StreamInfo getInfo(StreamExtractor extractor) throws ExtractionException {
private static StreamInfo getInfo(StreamExtractor extractor) throws ExtractionException, IOException {
extractor.fetchPage();
StreamInfo streamInfo;
try {
streamInfo = extractImportantData(extractor);

View File

@ -1,7 +1,5 @@
package org.schabi.newpipe.extractor.stream;
import org.schabi.newpipe.extractor.Subtitles;
public enum SubtitlesFormat {
// YouTube subtitles formats
// TRANSCRIPT(3) is default YT format based on TTML,
@ -13,8 +11,8 @@ public enum SubtitlesFormat {
TRANSCRIPT2 (0x3, "srv2"),
TRANSCRIPT3 (0x4, "srv3");
private int id;
private String extension;
private final int id;
private final String extension;
SubtitlesFormat(int id, String extension) {
this.id = id;

View File

@ -23,8 +23,8 @@ package org.schabi.newpipe.extractor.stream;
import org.schabi.newpipe.extractor.MediaFormat;
public class VideoStream extends Stream {
public String resolution;
public boolean isVideoOnly;
public final String resolution;
public final boolean isVideoOnly;
public VideoStream(String url, MediaFormat format, String resolution) {

View File

@ -9,7 +9,6 @@ import java.io.InputStreamReader;
import java.net.URL;
import java.net.UnknownHostException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;

View File

@ -29,6 +29,7 @@ public class SoundcloudStreamExtractorDefaultTest {
public void setUp() throws Exception {
NewPipe.init(Downloader.getInstance());
extractor = (SoundcloudStreamExtractor) SoundCloud.getService().getStreamExtractor("https://soundcloud.com/liluzivert/do-what-i-want-produced-by-maaly-raw-don-cannon");
extractor.fetchPage();
}
@Test

View File

@ -41,12 +41,14 @@ import static org.schabi.newpipe.extractor.ServiceList.YouTube;
*/
public class YoutubeStreamExtractorDefaultTest {
public static final String HTTPS = "https://";
private StreamExtractor extractor;
private YoutubeStreamExtractor extractor;
@Before
public void setUp() throws Exception {
NewPipe.init(Downloader.getInstance());
extractor = YouTube.getService().getStreamExtractor("https://www.youtube.com/watch?v=rYEDA3JcQqw");
extractor = (YoutubeStreamExtractor) YouTube.getService()
.getStreamExtractor("https://www.youtube.com/watch?v=rYEDA3JcQqw");
extractor.fetchPage();
}
@Test

View File

@ -28,6 +28,7 @@ public class YoutubeStreamExtractorRestrictedTest {
NewPipe.init(Downloader.getInstance());
extractor = (YoutubeStreamExtractor) YouTube.getService()
.getStreamExtractor("https://www.youtube.com/watch?v=i6JTvzrpBy0");
extractor.fetchPage();
}
@Test

View File

@ -70,7 +70,7 @@ public class YoutubeTrendingExtractorTest {
public void testGetStreams() throws Exception {
StreamInfoItemCollector collector = extractor.getStreams();
Utils.printErrors(collector);
assertTrue("no streams are received", collector.getItemList().isEmpty());
assertFalse("no streams are received", collector.getItemList().isEmpty());
}
@Test