add tests for searchextractor
This commit is contained in:
parent
06c67763d2
commit
5718d5b8b4
|
@ -12,10 +12,10 @@ public abstract class ListUrlIdHandler extends UrlIdHandler {
|
|||
|
||||
public ListUrlIdHandler setQuery(String id,
|
||||
List<String> contentFilter,
|
||||
String softFilter) throws ParsingException {
|
||||
String sortFilter) throws ParsingException {
|
||||
setId(id);
|
||||
this.contentFilter = contentFilter;
|
||||
this.sortFilter = softFilter;
|
||||
this.sortFilter = sortFilter;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -88,8 +88,8 @@ public abstract class StreamingService {
|
|||
public abstract PlaylistExtractor getPlaylistExtractor(ListUrlIdHandler urlIdHandler) throws ExtractionException;
|
||||
public abstract StreamExtractor getStreamExtractor(UrlIdHandler urlIdHandler) throws ExtractionException;
|
||||
|
||||
public SearchExtractor getSearchExtractor(String query, List<String> contentFilter, String softFilter, String contentCountry) throws ExtractionException {
|
||||
return getSearchExtractor(getSearchQueryHandler().setQuery(query, contentFilter, softFilter), contentCountry);
|
||||
public SearchExtractor getSearchExtractor(String query, List<String> contentFilter, String sortFilter, String contentCountry) throws ExtractionException {
|
||||
return getSearchExtractor(getSearchQueryHandler().setQuery(query, contentFilter, sortFilter), contentCountry);
|
||||
}
|
||||
|
||||
public ChannelExtractor getChannelExtractor(String id, List<String> contentFilter, String sortFilter) throws ExtractionException {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package org.schabi.newpipe.extractor.search;
|
||||
|
||||
import org.schabi.newpipe.extractor.ListUrlIdHandler;
|
||||
import org.schabi.newpipe.extractor.UrlIdHandler;
|
||||
import org.schabi.newpipe.extractor.exceptions.ParsingException;
|
||||
|
||||
import java.util.List;
|
||||
|
|
|
@ -21,7 +21,7 @@ public class SoundcloudQueryUrlHandler extends SearchQueryUrlHandler {
|
|||
try {
|
||||
String url = "https://api-v2.soundcloud.com/search";
|
||||
|
||||
if(getContentFilter().size() > 1) {
|
||||
if(getContentFilter().size() > 0) {
|
||||
switch (getContentFilter().get(0)) {
|
||||
case TRACKS:
|
||||
url += "/tracks";
|
||||
|
|
|
@ -66,8 +66,8 @@ public class YoutubeSearchExtractor extends SearchExtractor {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String getNextPageUrl() throws IOException, ExtractionException {
|
||||
return getUrl() + "&page=" + Integer.toString( 1);
|
||||
public String getNextPageUrl() throws ExtractionException {
|
||||
return getUrl() + "&page=" + Integer.toString( 2);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,13 +25,13 @@ public class YoutubeSearchQueryUrlHandler extends SearchQueryUrlHandler {
|
|||
final String url = "https://www.youtube.com/results"
|
||||
+ "?q=" + URLEncoder.encode(id, CHARSET_UTF_8);
|
||||
|
||||
if(getContentFilter().size() > 1) {
|
||||
if(getContentFilter().size() > 0) {
|
||||
switch (getContentFilter().get(0)) {
|
||||
case STREAM: return url + "&sp=EgIQAVAU";
|
||||
case CHANNEL: return url + "&sp=EgIQAlAU";
|
||||
case PLAYLIST: return url + "&sp=EgIQA1AU";
|
||||
case ANY:
|
||||
default: return url;
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
package org.schabi.newpipe.extractor.services.youtube.search;
|
||||
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.schabi.newpipe.Downloader;
|
||||
import org.schabi.newpipe.extractor.InfoItem;
|
||||
import org.schabi.newpipe.extractor.ListExtractor;
|
||||
import org.schabi.newpipe.extractor.NewPipe;
|
||||
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
|
||||
import org.schabi.newpipe.extractor.search.SearchEngine;
|
||||
import org.schabi.newpipe.extractor.search.SearchExtractor;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.schabi.newpipe.extractor.ServiceList.YouTube;
|
||||
|
||||
/*
|
||||
* Created by Christian Schabesberger on 29.12.15.
|
||||
*
|
||||
* Copyright (C) Christian Schabesberger 2015 <chris.schabesberger@mailbox.org>
|
||||
* YoutubeSearchExtractorStreamTest.java is part of NewPipe.
|
||||
*
|
||||
* NewPipe is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* NewPipe is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Test for {@link SearchEngine}
|
||||
*/
|
||||
public class YoutubeSearchExtractorAllTest {
|
||||
|
||||
private static SearchExtractor extractor;
|
||||
private static ListExtractor.InfoItemsPage<InfoItem> itemsPage;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUpClass() throws Exception {
|
||||
NewPipe.init(Downloader.getInstance());
|
||||
extractor = YouTube.getSearchExtractor("pewdiepie", "de");
|
||||
extractor.fetchPage();
|
||||
itemsPage = extractor.getInitialPage();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResultList_FirstElement() {
|
||||
InfoItem firstInfoItem = itemsPage.getItems().get(0);
|
||||
|
||||
// THe channel should be the first item
|
||||
assertTrue(firstInfoItem instanceof ChannelInfoItem);
|
||||
assertEquals("name", "PewDiePie", firstInfoItem.getName());
|
||||
assertEquals("url","https://www.youtube.com/user/PewDiePie", firstInfoItem.getUrl());
|
||||
}
|
||||
|
||||
@Ignore
|
||||
@Test
|
||||
public void testSuggestion() throws Exception {
|
||||
//todo write a real test
|
||||
assertTrue(extractor.getSearchSuggestion() != null);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
package org.schabi.newpipe.extractor.services.youtube.search;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.schabi.newpipe.extractor.InfoItem;
|
||||
import org.schabi.newpipe.extractor.ListExtractor;
|
||||
import org.schabi.newpipe.extractor.services.BaseListExtractorTest;
|
||||
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeSearchExtractor;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
|
||||
/*
|
||||
* Created by Christian Schabesberger on 27.05.18
|
||||
*
|
||||
* Copyright (C) Christian Schabesberger 2018 <chris.schabesberger@mailbox.org>
|
||||
* YoutubeSearchExtractorBaseTest.java is part of NewPipe.
|
||||
*
|
||||
* NewPipe is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* NewPipe is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Test for {@link YoutubeSearchExtractor}
|
||||
*/
|
||||
public abstract class YoutubeSearchExtractorBaseTest {
|
||||
|
||||
protected static YoutubeSearchExtractor extractor;
|
||||
protected static ListExtractor.InfoItemsPage<InfoItem> itemsPage;
|
||||
|
||||
|
||||
@Test
|
||||
public void testResultListElementsLength() {
|
||||
assertTrue(Integer.toString(itemsPage.getItems().size()),
|
||||
itemsPage.getItems().size() > 10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUrl() throws Exception {
|
||||
assertTrue(extractor.getUrl(), extractor.getUrl().startsWith("https://www.youtube.com"));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
package org.schabi.newpipe.extractor.services.youtube.search;
|
||||
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.schabi.newpipe.Downloader;
|
||||
import org.schabi.newpipe.extractor.InfoItem;
|
||||
import org.schabi.newpipe.extractor.ListExtractor;
|
||||
import org.schabi.newpipe.extractor.NewPipe;
|
||||
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
|
||||
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeSearchExtractor;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static org.junit.Assert.*;
|
||||
import static org.schabi.newpipe.extractor.ServiceList.YouTube;
|
||||
|
||||
public class YoutubeSearchExtractorChannelOnlyTest extends YoutubeSearchExtractorBaseTest {
|
||||
|
||||
@BeforeClass
|
||||
public static void setUpClass() throws Exception {
|
||||
NewPipe.init(Downloader.getInstance());
|
||||
extractor = (YoutubeSearchExtractor) YouTube.getSearchExtractor("pewdiepie",
|
||||
asList(new String[]{"channel"}), null, "de");
|
||||
extractor.fetchPage();
|
||||
itemsPage = extractor.getInitialPage();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetSecondPage() throws Exception {
|
||||
YoutubeSearchExtractor secondExtractor = (YoutubeSearchExtractor) YouTube.getSearchExtractor("pewdiepie",
|
||||
asList(new String[]{"channel"}), null, "de");
|
||||
ListExtractor.InfoItemsPage<InfoItem> secondPage = secondExtractor.getPage(itemsPage.getNextPageUrl());
|
||||
assertTrue(Integer.toString(secondPage.getItems().size()),
|
||||
secondPage.getItems().size() > 10);
|
||||
|
||||
// check if its the same result
|
||||
boolean equals = true;
|
||||
for (int i = 0; i < secondPage.getItems().size()
|
||||
&& i < itemsPage.getItems().size(); i++) {
|
||||
if(!secondPage.getItems().get(i).getUrl().equals(
|
||||
itemsPage.getItems().get(i).getUrl())) {
|
||||
equals = false;
|
||||
}
|
||||
}
|
||||
assertFalse("First and second page are equal", equals);
|
||||
|
||||
assertEquals("https://www.youtube.com/results?q=pewdiepie&sp=EgIQAlAU&page=3", secondPage.getNextPageUrl());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetSecondPageUrl() throws Exception {
|
||||
assertEquals("https://www.youtube.com/results?q=pewdiepie&sp=EgIQAlAU&page=2", extractor.getNextPageUrl());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOnlyContainChannels() {
|
||||
for(InfoItem item : itemsPage.getItems()) {
|
||||
if(!(item instanceof ChannelInfoItem)) {
|
||||
fail("The following item is no channel item: " + item.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
package org.schabi.newpipe.extractor.services.youtube.search;
|
||||
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.schabi.newpipe.Downloader;
|
||||
import org.schabi.newpipe.extractor.InfoItem;
|
||||
import org.schabi.newpipe.extractor.ListExtractor;
|
||||
import org.schabi.newpipe.extractor.NewPipe;
|
||||
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
|
||||
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeSearchExtractor;
|
||||
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.schabi.newpipe.extractor.ServiceList.YouTube;
|
||||
|
||||
/*
|
||||
* Created by Christian Schabesberger on 27.05.18
|
||||
*
|
||||
* Copyright (C) Christian Schabesberger 2018 <chris.schabesberger@mailbox.org>
|
||||
* YoutubeSearchExtractorStreamTest.java is part of NewPipe.
|
||||
*
|
||||
* NewPipe is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* NewPipe is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Test for {@link YoutubeSearchExtractor}
|
||||
*/
|
||||
public class YoutubeSearchExtractorDefaultTest extends YoutubeSearchExtractorBaseTest {
|
||||
|
||||
@BeforeClass
|
||||
public static void setUpClass() throws Exception {
|
||||
NewPipe.init(Downloader.getInstance());
|
||||
extractor = (YoutubeSearchExtractor) YouTube.getSearchExtractor("pewdiepie", "de");
|
||||
extractor.fetchPage();
|
||||
itemsPage = extractor.getInitialPage();
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Test
|
||||
public void testGetSecondPageUrl() throws Exception {
|
||||
assertEquals(extractor.getNextPageUrl(), "https://www.youtube.com/results?q=pewdiepie&page=2");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResultList_FirstElement() {
|
||||
InfoItem firstInfoItem = itemsPage.getItems().get(0);
|
||||
|
||||
// THe channel should be the first item
|
||||
assertTrue(firstInfoItem instanceof ChannelInfoItem);
|
||||
assertEquals("name", "PewDiePie", firstInfoItem.getName());
|
||||
assertEquals("url","https://www.youtube.com/user/PewDiePie", firstInfoItem.getUrl());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResultListCheckIfContainsStreamItems() {
|
||||
boolean hasStreams = false;
|
||||
for(InfoItem item : itemsPage.getItems()) {
|
||||
if(item instanceof StreamInfoItem) {
|
||||
hasStreams = true;
|
||||
}
|
||||
}
|
||||
assertTrue("Has no InfoItemStreams", hasStreams);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetSecondPage() throws Exception {
|
||||
YoutubeSearchExtractor secondExtractor =
|
||||
(YoutubeSearchExtractor) YouTube.getSearchExtractor("pewdiepie", "de");
|
||||
ListExtractor.InfoItemsPage<InfoItem> secondPage = secondExtractor.getPage(itemsPage.getNextPageUrl());
|
||||
assertTrue(Integer.toString(secondPage.getItems().size()),
|
||||
secondPage.getItems().size() > 10);
|
||||
|
||||
// check if its the same result
|
||||
boolean equals = true;
|
||||
for (int i = 0; i < secondPage.getItems().size()
|
||||
&& i < itemsPage.getItems().size(); i++) {
|
||||
if(!secondPage.getItems().get(i).getUrl().equals(
|
||||
itemsPage.getItems().get(i).getUrl())) {
|
||||
equals = false;
|
||||
}
|
||||
}
|
||||
assertFalse("First and second page are equal", equals);
|
||||
|
||||
assertEquals("https://www.youtube.com/results?q=pewdiepie&page=3", secondPage.getNextPageUrl());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSuggestionNotNull() throws Exception {
|
||||
//todo write a real test
|
||||
assertTrue(extractor.getSearchSuggestion() != null);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testId() throws Exception {
|
||||
assertEquals(extractor.getId(), "pewdiepie");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testName() {
|
||||
assertEquals(extractor.getName(), "pewdiepie");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
package org.schabi.newpipe.extractor.services.youtube.search;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.schabi.newpipe.extractor.ServiceList.YouTube;
|
||||
|
||||
public class YoutubeSearchQUHTest {
|
||||
|
||||
@Test
|
||||
public void testRegularValues() throws Exception {
|
||||
assertEquals("https://www.youtube.com/results?q=asdf", YouTube.getSearchQueryHandler().setQuery("asdf").getUrl());
|
||||
assertEquals("https://www.youtube.com/results?q=hans",YouTube.getSearchQueryHandler().setQuery("hans").getUrl());
|
||||
assertEquals("https://www.youtube.com/results?q=Poifj%26jaijf", YouTube.getSearchQueryHandler().setQuery("Poifj&jaijf").getUrl());
|
||||
assertEquals("https://www.youtube.com/results?q=G%C3%BCl%C3%BCm", YouTube.getSearchQueryHandler().setQuery("Gülüm").getUrl());
|
||||
assertEquals("https://www.youtube.com/results?q=%3Fj%24%29H%C2%A7B", YouTube.getSearchQueryHandler().setQuery("?j$)H§B").getUrl());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetContentFilter() throws Exception {
|
||||
assertEquals("stream", YouTube.getSearchQueryHandler()
|
||||
.setQuery("", asList(new String[]{"stream"}), "").getContentFilter().get(0));
|
||||
assertEquals("channel", YouTube.getSearchQueryHandler()
|
||||
.setQuery("asdf", asList(new String[]{"channel"}), "").getContentFilter().get(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWithContentfilter() throws Exception {
|
||||
assertEquals("https://www.youtube.com/results?q=asdf&sp=EgIQAVAU", YouTube.getSearchQueryHandler()
|
||||
.setQuery("asdf", asList(new String[]{"stream"}), "").getUrl());
|
||||
assertEquals("https://www.youtube.com/results?q=asdf&sp=EgIQAlAU", YouTube.getSearchQueryHandler()
|
||||
.setQuery("asdf", asList(new String[]{"channel"}), "").getUrl());
|
||||
assertEquals("https://www.youtube.com/results?q=asdf&sp=EgIQA1AU", YouTube.getSearchQueryHandler()
|
||||
.setQuery("asdf", asList(new String[]{"playlist"}), "").getUrl());
|
||||
assertEquals("https://www.youtube.com/results?q=asdf", YouTube.getSearchQueryHandler()
|
||||
.setQuery("asdf", asList(new String[]{"fjiijie"}), "").getUrl());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetAvailableContentFilter() {
|
||||
final String[] contentFilter = YouTube.getSearchQueryHandler().getAvailableContentFilter();
|
||||
assertEquals(4, contentFilter.length);
|
||||
assertEquals("stream", contentFilter[0]);
|
||||
assertEquals("channel", contentFilter[1]);
|
||||
assertEquals("playlist", contentFilter[2]);
|
||||
assertEquals("any", contentFilter[3]);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetAvailableSortFilter() {
|
||||
final String[] contentFilter = YouTube.getSearchQueryHandler().getAvailableSortFilter();
|
||||
assertEquals(0, contentFilter.length);
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue