Improve search extractor tests for services
This commit is contained in:
parent
d72130edae
commit
9704fc9952
|
@ -0,0 +1,7 @@
|
||||||
|
package org.schabi.newpipe.extractor.services;
|
||||||
|
|
||||||
|
@SuppressWarnings("unused")
|
||||||
|
public interface BaseSearchExtractorTest extends BaseListExtractorTest {
|
||||||
|
void testSearchString() throws Exception;
|
||||||
|
void testSearchSuggestion() throws Exception;
|
||||||
|
}
|
|
@ -0,0 +1,59 @@
|
||||||
|
package org.schabi.newpipe.extractor.services;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.schabi.newpipe.extractor.Extractor;
|
||||||
|
import org.schabi.newpipe.extractor.StreamingService;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.schabi.newpipe.extractor.ExtractorAsserts.assertIsSecureUrl;
|
||||||
|
|
||||||
|
public abstract class DefaultExtractorTest<T extends Extractor> implements BaseExtractorTest {
|
||||||
|
public abstract T extractor() throws Exception;
|
||||||
|
|
||||||
|
public abstract StreamingService expectedService() throws Exception;
|
||||||
|
public abstract String expectedName() throws Exception;
|
||||||
|
public abstract String expectedId() throws Exception;
|
||||||
|
public abstract String expectedUrlContains() throws Exception;
|
||||||
|
public abstract String expectedOriginalUrlContains() throws Exception;
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Override
|
||||||
|
public void testServiceId() throws Exception {
|
||||||
|
assertEquals(expectedService().getServiceId(), extractor().getServiceId());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Override
|
||||||
|
public void testName() throws Exception {
|
||||||
|
assertEquals(expectedName(), extractor().getName());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Override
|
||||||
|
public void testId() throws Exception {
|
||||||
|
assertEquals(expectedId(), extractor().getId());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Override
|
||||||
|
public void testUrl() throws Exception {
|
||||||
|
final String url = extractor().getUrl();
|
||||||
|
final String expectedContains = expectedUrlContains();
|
||||||
|
|
||||||
|
assertIsSecureUrl(url);
|
||||||
|
assertTrue("Url \"" + url + "\" doesn't contains \"" + expectedContains + "\"",
|
||||||
|
url.contains(expectedContains));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Override
|
||||||
|
public void testOriginalUrl() throws Exception {
|
||||||
|
final String originalUrl = extractor().getOriginalUrl();
|
||||||
|
final String expectedContains = expectedOriginalUrlContains();
|
||||||
|
|
||||||
|
assertIsSecureUrl(originalUrl);
|
||||||
|
assertTrue("Original url \"" + originalUrl + "\" doesn't contains \"" + expectedContains + "\"",
|
||||||
|
originalUrl.contains(expectedContains));
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
package org.schabi.newpipe.extractor.services;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.schabi.newpipe.extractor.InfoItem;
|
||||||
|
import org.schabi.newpipe.extractor.ListExtractor;
|
||||||
|
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
import static org.schabi.newpipe.extractor.services.DefaultTests.*;
|
||||||
|
|
||||||
|
public abstract class DefaultListExtractorTest<T extends ListExtractor<? extends InfoItem>> extends DefaultExtractorTest<T>
|
||||||
|
implements BaseListExtractorTest {
|
||||||
|
|
||||||
|
@Nullable
|
||||||
|
public InfoItem.InfoType expectedInfoItemType() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean expectedHasMoreItems() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Override
|
||||||
|
public void testRelatedItems() throws Exception {
|
||||||
|
final ListExtractor<? extends InfoItem> extractor = extractor();
|
||||||
|
|
||||||
|
final InfoItem.InfoType expectedType = expectedInfoItemType();
|
||||||
|
final ListExtractor.InfoItemsPage<? extends InfoItem> items = defaultTestRelatedItems(extractor);
|
||||||
|
if (expectedType != null) {
|
||||||
|
assertOnlyContainsType(items, expectedType);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Override
|
||||||
|
public void testMoreRelatedItems() throws Exception {
|
||||||
|
final ListExtractor<? extends InfoItem> extractor = extractor();
|
||||||
|
|
||||||
|
if (expectedHasMoreItems()) {
|
||||||
|
final InfoItem.InfoType expectedType = expectedInfoItemType();
|
||||||
|
final ListExtractor.InfoItemsPage<? extends InfoItem> items = defaultTestMoreItems(extractor);
|
||||||
|
if (expectedType != null) {
|
||||||
|
assertOnlyContainsType(items, expectedType);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assertNoMoreItems(extractor);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
package org.schabi.newpipe.extractor.services;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.schabi.newpipe.extractor.search.SearchExtractor;
|
||||||
|
|
||||||
|
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.schabi.newpipe.extractor.ExtractorAsserts.assertEmpty;
|
||||||
|
|
||||||
|
public abstract class DefaultSearchExtractorTest extends DefaultListExtractorTest<SearchExtractor>
|
||||||
|
implements BaseSearchExtractorTest {
|
||||||
|
|
||||||
|
public abstract String expectedSearchString();
|
||||||
|
@Nullable public abstract String expectedSearchSuggestion();
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Override
|
||||||
|
public void testSearchString() throws Exception {
|
||||||
|
assertEquals(expectedSearchString(), extractor().getSearchString());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Override
|
||||||
|
public void testSearchSuggestion() throws Exception {
|
||||||
|
final String expectedSearchSuggestion = expectedSearchSuggestion();
|
||||||
|
if (expectedSearchSuggestion == null || expectedSearchSuggestion.isEmpty()) {
|
||||||
|
assertEmpty("Suggestion was expected to be empty", extractor().getSearchSuggestion());
|
||||||
|
} else {
|
||||||
|
assertEquals(expectedSearchSuggestion, extractor().getSearchSuggestion());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -2,23 +2,22 @@ package org.schabi.newpipe.extractor.services;
|
||||||
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
import org.schabi.newpipe.extractor.InfoItem;
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
import org.schabi.newpipe.extractor.ListExtractor;
|
||||||
import org.schabi.newpipe.extractor.NewPipe;
|
|
||||||
import org.schabi.newpipe.extractor.StreamingService;
|
import org.schabi.newpipe.extractor.StreamingService;
|
||||||
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
|
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
|
||||||
import org.schabi.newpipe.extractor.exceptions.ParsingException;
|
import org.schabi.newpipe.extractor.exceptions.ParsingException;
|
||||||
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
|
|
||||||
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
|
|
||||||
import org.schabi.newpipe.extractor.localization.DateWrapper;
|
import org.schabi.newpipe.extractor.localization.DateWrapper;
|
||||||
import org.schabi.newpipe.extractor.playlist.PlaylistInfoItem;
|
import org.schabi.newpipe.extractor.playlist.PlaylistInfoItem;
|
||||||
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
|
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
|
||||||
|
|
||||||
import java.util.Calendar;
|
import java.util.Calendar;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
import static junit.framework.TestCase.assertFalse;
|
import static junit.framework.TestCase.assertFalse;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
import static org.schabi.newpipe.extractor.ExtractorAsserts.*;
|
import static org.schabi.newpipe.extractor.ExtractorAsserts.*;
|
||||||
import static org.schabi.newpipe.extractor.StreamingService.*;
|
import static org.schabi.newpipe.extractor.StreamingService.LinkType;
|
||||||
|
|
||||||
public final class DefaultTests {
|
public final class DefaultTests {
|
||||||
public static void defaultTestListOfItems(StreamingService expectedService, List<? extends InfoItem> itemsList, List<Throwable> errors) throws ParsingException {
|
public static void defaultTestListOfItems(StreamingService expectedService, List<? extends InfoItem> itemsList, List<Throwable> errors) throws ParsingException {
|
||||||
|
@ -71,12 +70,38 @@ public final class DefaultTests {
|
||||||
expectedLinkType, linkTypeByUrl);
|
expectedLinkType, linkTypeByUrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static void assertOnlyContainsType(ListExtractor.InfoItemsPage<? extends InfoItem> items, InfoItem.InfoType expectedType) {
|
||||||
|
for (InfoItem item : items.getItems()) {
|
||||||
|
assertEquals("Item list contains unexpected info types",
|
||||||
|
expectedType, item.getInfoType());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static <T extends InfoItem> void assertNoMoreItems(ListExtractor<T> extractor) throws Exception {
|
public static <T extends InfoItem> void assertNoMoreItems(ListExtractor<T> extractor) throws Exception {
|
||||||
assertFalse("More items available when it shouldn't", extractor.hasNextPage());
|
assertFalse("More items available when it shouldn't", extractor.hasNextPage());
|
||||||
final String nextPageUrl = extractor.getNextPageUrl();
|
final String nextPageUrl = extractor.getNextPageUrl();
|
||||||
assertTrue("Next page is not empty or null", nextPageUrl == null || nextPageUrl.isEmpty());
|
assertTrue("Next page is not empty or null", nextPageUrl == null || nextPageUrl.isEmpty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static void assertNoDuplicatedItems(StreamingService expectedService,
|
||||||
|
ListExtractor.InfoItemsPage<InfoItem> page1,
|
||||||
|
ListExtractor.InfoItemsPage<InfoItem> page2) throws Exception {
|
||||||
|
defaultTestListOfItems(expectedService, page1.getItems(), page1.getErrors());
|
||||||
|
defaultTestListOfItems(expectedService, page2.getItems(), page2.getErrors());
|
||||||
|
|
||||||
|
final Set<String> urlsSet = new HashSet<>();
|
||||||
|
for (InfoItem item : page1.getItems()) {
|
||||||
|
urlsSet.add(item.getUrl());
|
||||||
|
}
|
||||||
|
|
||||||
|
for (InfoItem item : page2.getItems()) {
|
||||||
|
final boolean wasAdded = urlsSet.add(item.getUrl());
|
||||||
|
if (!wasAdded) {
|
||||||
|
fail("Same item was on the first and second page item list");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static <T extends InfoItem> ListExtractor.InfoItemsPage<T> defaultTestRelatedItems(ListExtractor<T> extractor) throws Exception {
|
public static <T extends InfoItem> ListExtractor.InfoItemsPage<T> defaultTestRelatedItems(ListExtractor<T> extractor) throws Exception {
|
||||||
final ListExtractor.InfoItemsPage<T> page = extractor.getInitialPage();
|
final ListExtractor.InfoItemsPage<T> page = extractor.getInitialPage();
|
||||||
final List<T> itemsList = page.getItems();
|
final List<T> itemsList = page.getItems();
|
||||||
|
|
|
@ -1,58 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.media_ccc;
|
|
||||||
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.DownloaderTestImpl;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.NewPipe;
|
|
||||||
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.search.SearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.media_ccc.extractors.MediaCCCSearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.media_ccc.linkHandler.MediaCCCSearchQueryHandlerFactory;
|
|
||||||
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import static junit.framework.TestCase.assertTrue;
|
|
||||||
import static org.schabi.newpipe.extractor.ServiceList.MediaCCC;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test for {@link MediaCCCSearchExtractor}
|
|
||||||
*/
|
|
||||||
public class MediaCCCSearchExtractorAllTest {
|
|
||||||
|
|
||||||
private static SearchExtractor extractor;
|
|
||||||
private static ListExtractor.InfoItemsPage<InfoItem> itemsPage;
|
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
public static void setUpClass() throws Exception {
|
|
||||||
NewPipe.init(DownloaderTestImpl.getInstance());
|
|
||||||
extractor = MediaCCC.getSearchExtractor(new MediaCCCSearchQueryHandlerFactory()
|
|
||||||
.fromQuery("c3", Arrays.asList(new String[0]), ""));
|
|
||||||
extractor.fetchPage();
|
|
||||||
itemsPage = extractor.getInitialPage();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testIfChannelInfoItemsAvailable() {
|
|
||||||
boolean isAvialable = false;
|
|
||||||
for (InfoItem item : itemsPage.getItems()) {
|
|
||||||
if (item instanceof ChannelInfoItem) {
|
|
||||||
isAvialable = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertTrue("ChannelInfoItem not in all list", isAvialable);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testIfStreamInfoitemsAvailable() {
|
|
||||||
boolean isAvialable = false;
|
|
||||||
for (InfoItem item : itemsPage.getItems()) {
|
|
||||||
if (item instanceof StreamInfoItem) {
|
|
||||||
isAvialable = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertTrue("ChannelInfoItem not in all list", isAvialable);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,48 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.media_ccc;
|
|
||||||
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.DownloaderTestImpl;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.NewPipe;
|
|
||||||
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.search.SearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.media_ccc.extractors.MediaCCCSearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.media_ccc.linkHandler.MediaCCCSearchQueryHandlerFactory;
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import static junit.framework.TestCase.assertTrue;
|
|
||||||
import static org.schabi.newpipe.extractor.ServiceList.MediaCCC;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test for {@link MediaCCCSearchExtractor}
|
|
||||||
*/
|
|
||||||
public class MediaCCCSearchExtractorConferencesTest {
|
|
||||||
|
|
||||||
private static SearchExtractor extractor;
|
|
||||||
private static ListExtractor.InfoItemsPage<InfoItem> itemsPage;
|
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
public static void setUpClass() throws Exception {
|
|
||||||
NewPipe.init(DownloaderTestImpl.getInstance());
|
|
||||||
extractor = MediaCCC.getSearchExtractor(new MediaCCCSearchQueryHandlerFactory()
|
|
||||||
.fromQuery("c3", Arrays.asList(new String[]{"conferences"}), ""));
|
|
||||||
extractor.fetchPage();
|
|
||||||
itemsPage = extractor.getInitialPage();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testReturnTypeChannel() {
|
|
||||||
for (InfoItem item : itemsPage.getItems()) {
|
|
||||||
assertTrue("Item is not of type channel", item instanceof ChannelInfoItem);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testItemCount() {
|
|
||||||
assertTrue("Count is to hight: " + itemsPage.getItems().size(), itemsPage.getItems().size() < 127);
|
|
||||||
assertTrue("Countis to low: " + itemsPage.getItems().size(), itemsPage.getItems().size() >= 29);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,72 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.media_ccc;
|
|
||||||
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.DownloaderTestImpl;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.NewPipe;
|
|
||||||
import org.schabi.newpipe.extractor.search.SearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.media_ccc.extractors.MediaCCCSearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.media_ccc.linkHandler.MediaCCCSearchQueryHandlerFactory;
|
|
||||||
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import static junit.framework.TestCase.assertEquals;
|
|
||||||
import static junit.framework.TestCase.assertTrue;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.schabi.newpipe.extractor.ServiceList.MediaCCC;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test for {@link MediaCCCSearchExtractor}
|
|
||||||
*/
|
|
||||||
public class MediaCCCSearchExtractorEventsTest {
|
|
||||||
private static SearchExtractor extractor;
|
|
||||||
private static ListExtractor.InfoItemsPage<InfoItem> itemsPage;
|
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
public static void setUpClass() throws Exception {
|
|
||||||
NewPipe.init(DownloaderTestImpl.getInstance());
|
|
||||||
extractor = MediaCCC.getSearchExtractor(new MediaCCCSearchQueryHandlerFactory()
|
|
||||||
.fromQuery("linux", Arrays.asList(new String[]{"events"}), ""));
|
|
||||||
extractor.fetchPage();
|
|
||||||
itemsPage = extractor.getInitialPage();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCount() throws Exception {
|
|
||||||
assertTrue(Integer.toString(itemsPage.getItems().size()),
|
|
||||||
itemsPage.getItems().size() >= 25);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testServiceId() throws Exception {
|
|
||||||
assertEquals(2, extractor.getServiceId());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testName() throws Exception {
|
|
||||||
assertFalse(itemsPage.getItems().get(0).getName(), itemsPage.getItems().get(0).getName().isEmpty());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testUrl() throws Exception {
|
|
||||||
assertTrue("Url should start with: https://api.media.ccc.de/public/events/",
|
|
||||||
itemsPage.getItems().get(0).getUrl().startsWith("https://api.media.ccc.de/public/events/"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testThumbnailUrl() throws Exception {
|
|
||||||
assertTrue(itemsPage.getItems().get(0).getThumbnailUrl(),
|
|
||||||
itemsPage.getItems().get(0).getThumbnailUrl().startsWith("https://static.media.ccc.de/media/")
|
|
||||||
&& itemsPage.getItems().get(0).getThumbnailUrl().endsWith(".jpg"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testReturnTypeStream() throws Exception {
|
|
||||||
for (InfoItem item : itemsPage.getItems()) {
|
|
||||||
assertTrue("Item is not of type StreamInfoItem", item instanceof StreamInfoItem);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,90 @@
|
||||||
|
package org.schabi.newpipe.extractor.services.media_ccc.search;
|
||||||
|
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.schabi.newpipe.DownloaderTestImpl;
|
||||||
|
import org.schabi.newpipe.extractor.InfoItem;
|
||||||
|
import org.schabi.newpipe.extractor.NewPipe;
|
||||||
|
import org.schabi.newpipe.extractor.StreamingService;
|
||||||
|
import org.schabi.newpipe.extractor.search.SearchExtractor;
|
||||||
|
import org.schabi.newpipe.extractor.services.DefaultSearchExtractorTest;
|
||||||
|
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
import static java.util.Collections.singletonList;
|
||||||
|
import static org.schabi.newpipe.extractor.ServiceList.MediaCCC;
|
||||||
|
import static org.schabi.newpipe.extractor.services.media_ccc.linkHandler.MediaCCCSearchQueryHandlerFactory.CONFERENCES;
|
||||||
|
import static org.schabi.newpipe.extractor.services.media_ccc.linkHandler.MediaCCCSearchQueryHandlerFactory.EVENTS;
|
||||||
|
|
||||||
|
public class MediaCCCSearchExtractorTest {
|
||||||
|
|
||||||
|
public static class All extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "kde";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = MediaCCC.getSearchExtractor(QUERY);
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return MediaCCC; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "api.media.ccc.de/public/events/search?q=" + QUERY; }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "api.media.ccc.de/public/events/search?q=" + QUERY; }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
|
||||||
|
@Override public boolean expectedHasMoreItems() { return false; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class Conferences extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "c3";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = MediaCCC.getSearchExtractor(QUERY, singletonList(CONFERENCES), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return MediaCCC; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "api.media.ccc.de/public/events/search?q=" + QUERY; }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "api.media.ccc.de/public/events/search?q=" + QUERY; }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
|
||||||
|
@Nullable @Override public InfoItem.InfoType expectedInfoItemType() { return InfoItem.InfoType.CHANNEL; }
|
||||||
|
@Override public boolean expectedHasMoreItems() { return false; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class Events extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "linux";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = MediaCCC.getSearchExtractor(QUERY, singletonList(EVENTS), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return MediaCCC; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "api.media.ccc.de/public/events/search?q=" + QUERY; }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "api.media.ccc.de/public/events/search?q=" + QUERY; }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
|
||||||
|
@Nullable @Override public InfoItem.InfoType expectedInfoItemType() { return InfoItem.InfoType.STREAM; }
|
||||||
|
@Override public boolean expectedHasMoreItems() { return false; }
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,28 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.peertube.search;
|
|
||||||
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.peertube.extractors.PeertubeSearchExtractor;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test for {@link PeertubeSearchExtractor}
|
|
||||||
*/
|
|
||||||
public abstract class PeertubeSearchExtractorBaseTest {
|
|
||||||
|
|
||||||
protected static PeertubeSearchExtractor extractor;
|
|
||||||
protected static ListExtractor.InfoItemsPage<InfoItem> itemsPage;
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testResultListElementsLength() {
|
|
||||||
assertTrue(Integer.toString(itemsPage.getItems().size()),
|
|
||||||
itemsPage.getItems().size() >= 3);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testUrl() throws Exception {
|
|
||||||
assertTrue(extractor.getUrl(), extractor.getUrl().startsWith("https://peertube.mastodon.host/api/v1/search/videos"));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,89 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.peertube.search;
|
|
||||||
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.DownloaderTestImpl;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.NewPipe;
|
|
||||||
import org.schabi.newpipe.extractor.services.peertube.PeertubeInstance;
|
|
||||||
import org.schabi.newpipe.extractor.services.peertube.extractors.PeertubeSearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
|
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
|
||||||
import static org.schabi.newpipe.extractor.ServiceList.PeerTube;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test for {@link PeertubeSearchExtractor}
|
|
||||||
*/
|
|
||||||
public class PeertubeSearchExtractorDefaultTest extends PeertubeSearchExtractorBaseTest {
|
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
public static void setUpClass() throws Exception {
|
|
||||||
NewPipe.init(DownloaderTestImpl.getInstance());
|
|
||||||
// setting instance might break test when running in parallel
|
|
||||||
PeerTube.setInstance(new PeertubeInstance("https://peertube.mastodon.host", "PeerTube on Mastodon.host"));
|
|
||||||
extractor = (PeertubeSearchExtractor) PeerTube.getSearchExtractor("kde");
|
|
||||||
extractor.fetchPage();
|
|
||||||
itemsPage = extractor.getInitialPage();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetSecondPageUrl() throws Exception {
|
|
||||||
assertEquals("https://peertube.mastodon.host/api/v1/search/videos?search=kde&start=12&count=12", extractor.getNextPageUrl());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testResultList_FirstElement() {
|
|
||||||
InfoItem firstInfoItem = itemsPage.getItems().get(0);
|
|
||||||
|
|
||||||
assertTrue("search does not match", firstInfoItem.getName().toLowerCase().contains("kde"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testResultListCheckIfContainsStreamItems() {
|
|
||||||
boolean hasStreams = false;
|
|
||||||
for (InfoItem item : itemsPage.getItems()) {
|
|
||||||
if (item instanceof StreamInfoItem) {
|
|
||||||
hasStreams = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertTrue("Has no InfoItemStreams", hasStreams);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetSecondPage() throws Exception {
|
|
||||||
extractor = (PeertubeSearchExtractor) PeerTube.getSearchExtractor("internet");
|
|
||||||
itemsPage = extractor.getInitialPage();
|
|
||||||
PeertubeSearchExtractor secondExtractor =
|
|
||||||
(PeertubeSearchExtractor) PeerTube.getSearchExtractor("internet");
|
|
||||||
ListExtractor.InfoItemsPage<InfoItem> secondPage = secondExtractor.getPage(itemsPage.getNextPageUrl());
|
|
||||||
assertTrue(Integer.toString(secondPage.getItems().size()),
|
|
||||||
secondPage.getItems().size() >= 10);
|
|
||||||
|
|
||||||
// check if its the same result
|
|
||||||
boolean equals = true;
|
|
||||||
for (int i = 0; i < secondPage.getItems().size()
|
|
||||||
&& i < itemsPage.getItems().size(); i++) {
|
|
||||||
if (!secondPage.getItems().get(i).getUrl().equals(
|
|
||||||
itemsPage.getItems().get(i).getUrl())) {
|
|
||||||
equals = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertFalse("First and second page are equal", equals);
|
|
||||||
|
|
||||||
assertEquals("https://peertube.mastodon.host/api/v1/search/videos?search=internet&start=24&count=12",
|
|
||||||
secondPage.getNextPageUrl());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testId() throws Exception {
|
|
||||||
assertEquals("kde", extractor.getId());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testName() {
|
|
||||||
assertEquals("kde", extractor.getName());
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
package org.schabi.newpipe.extractor.services.peertube.search;
|
||||||
|
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.schabi.newpipe.DownloaderTestImpl;
|
||||||
|
import org.schabi.newpipe.extractor.InfoItem;
|
||||||
|
import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
|
||||||
|
import org.schabi.newpipe.extractor.NewPipe;
|
||||||
|
import org.schabi.newpipe.extractor.StreamingService;
|
||||||
|
import org.schabi.newpipe.extractor.search.SearchExtractor;
|
||||||
|
import org.schabi.newpipe.extractor.services.DefaultSearchExtractorTest;
|
||||||
|
import org.schabi.newpipe.extractor.services.peertube.PeertubeInstance;
|
||||||
|
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
import static java.util.Collections.singletonList;
|
||||||
|
import static org.schabi.newpipe.extractor.ServiceList.PeerTube;
|
||||||
|
import static org.schabi.newpipe.extractor.services.DefaultTests.assertNoDuplicatedItems;
|
||||||
|
import static org.schabi.newpipe.extractor.services.peertube.linkHandler.PeertubeSearchQueryHandlerFactory.VIDEOS;
|
||||||
|
|
||||||
|
public class PeertubeSearchExtractorTest {
|
||||||
|
|
||||||
|
public static class All extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "kde";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
// setting instance might break test when running in parallel
|
||||||
|
PeerTube.setInstance(new PeertubeInstance("https://peertube.mastodon.host", "PeerTube on Mastodon.host"));
|
||||||
|
extractor = PeerTube.getSearchExtractor(QUERY);
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return PeerTube; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "/search/videos?search=" + QUERY; }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "/search/videos?search=" + QUERY; }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class PagingTest {
|
||||||
|
@Test
|
||||||
|
public void duplicatedItemsCheck() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
final SearchExtractor extractor = PeerTube.getSearchExtractor("internet", singletonList(VIDEOS), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
|
||||||
|
final InfoItemsPage<InfoItem> page1 = extractor.getInitialPage();
|
||||||
|
final InfoItemsPage<InfoItem> page2 = extractor.getPage(page1.getNextPageUrl());
|
||||||
|
|
||||||
|
assertNoDuplicatedItems(PeerTube, page1, page2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,55 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.soundcloud.search;
|
|
||||||
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudSearchExtractor;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Created by Christian Schabesberger on 17.06.18
|
|
||||||
*
|
|
||||||
* Copyright (C) Christian Schabesberger 2018 <chris.schabesberger@mailbox.org>
|
|
||||||
* SoundcloudSearchExtractorBaseTest.java is part of NewPipe.
|
|
||||||
*
|
|
||||||
* NewPipe is free software: you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License as published by
|
|
||||||
* the Free Software Foundation, either version 3 of the License, or
|
|
||||||
* (at your option) any later version.
|
|
||||||
*
|
|
||||||
* NewPipe is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
* GNU General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License
|
|
||||||
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test for {@link SoundcloudSearchExtractor}
|
|
||||||
*/
|
|
||||||
public abstract class SoundcloudSearchExtractorBaseTest {
|
|
||||||
|
|
||||||
protected static SoundcloudSearchExtractor extractor;
|
|
||||||
protected static ListExtractor.InfoItemsPage<InfoItem> itemsPage;
|
|
||||||
|
|
||||||
|
|
||||||
protected static String removeClientId(String url) {
|
|
||||||
String[] splitUrl = url.split("client_id=[a-zA-Z0-9]*&");
|
|
||||||
return splitUrl[0] + splitUrl[1];
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testResultListElementsLength() {
|
|
||||||
assertTrue(Integer.toString(itemsPage.getItems().size()),
|
|
||||||
itemsPage.getItems().size() >= 3);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testUrl() throws Exception {
|
|
||||||
assertTrue(extractor.getUrl(), extractor.getUrl().startsWith("https://api-v2.soundcloud.com/search"));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,66 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.soundcloud.search;
|
|
||||||
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.DownloaderTestImpl;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.NewPipe;
|
|
||||||
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.localization.Localization;
|
|
||||||
import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudSearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudSearchQueryHandlerFactory;
|
|
||||||
|
|
||||||
import static java.util.Arrays.asList;
|
|
||||||
import static org.junit.Assert.*;
|
|
||||||
import static org.schabi.newpipe.extractor.ServiceList.SoundCloud;
|
|
||||||
|
|
||||||
public class SoundcloudSearchExtractorChannelOnlyTest extends SoundcloudSearchExtractorBaseTest {
|
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
public static void setUpClass() throws Exception {
|
|
||||||
NewPipe.init(DownloaderTestImpl.getInstance(), new Localization("de", "DE"));
|
|
||||||
extractor = (SoundcloudSearchExtractor) SoundCloud.getSearchExtractor("lill uzi vert",
|
|
||||||
asList(SoundcloudSearchQueryHandlerFactory.USERS), null);
|
|
||||||
extractor.fetchPage();
|
|
||||||
itemsPage = extractor.getInitialPage();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetSecondPage() throws Exception {
|
|
||||||
SoundcloudSearchExtractor secondExtractor = (SoundcloudSearchExtractor) SoundCloud.getSearchExtractor("lill uzi vert",
|
|
||||||
asList(SoundcloudSearchQueryHandlerFactory.USERS), null);
|
|
||||||
ListExtractor.InfoItemsPage<InfoItem> secondPage = secondExtractor.getPage(itemsPage.getNextPageUrl());
|
|
||||||
assertTrue(Integer.toString(secondPage.getItems().size()),
|
|
||||||
secondPage.getItems().size() >= 3);
|
|
||||||
|
|
||||||
// check if its the same result
|
|
||||||
boolean equals = true;
|
|
||||||
for (int i = 0; i < secondPage.getItems().size()
|
|
||||||
&& i < itemsPage.getItems().size(); i++) {
|
|
||||||
if (!secondPage.getItems().get(i).getUrl().equals(
|
|
||||||
itemsPage.getItems().get(i).getUrl())) {
|
|
||||||
equals = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertFalse("First and second page are equal", equals);
|
|
||||||
|
|
||||||
assertEquals("https://api-v2.soundcloud.com/search/users?q=lill+uzi+vert&limit=10&offset=20",
|
|
||||||
removeClientId(secondPage.getNextPageUrl()));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetSecondPageUrl() throws Exception {
|
|
||||||
assertEquals("https://api-v2.soundcloud.com/search/users?q=lill+uzi+vert&limit=10&offset=10",
|
|
||||||
removeClientId(extractor.getNextPageUrl()));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testOnlyContainChannels() {
|
|
||||||
for (InfoItem item : itemsPage.getItems()) {
|
|
||||||
if (!(item instanceof ChannelInfoItem)) {
|
|
||||||
fail("The following item is no channel item: " + item.toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,104 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.soundcloud.search;
|
|
||||||
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.DownloaderTestImpl;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.NewPipe;
|
|
||||||
import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudSearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudSearchQueryHandlerFactory;
|
|
||||||
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeSearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
|
||||||
import static org.schabi.newpipe.extractor.ServiceList.SoundCloud;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Created by Christian Schabesberger on 27.05.18
|
|
||||||
*
|
|
||||||
* Copyright (C) Christian Schabesberger 2018 <chris.schabesberger@mailbox.org>
|
|
||||||
* YoutubeSearchExtractorStreamTest.java is part of NewPipe.
|
|
||||||
*
|
|
||||||
* NewPipe is free software: you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License as published by
|
|
||||||
* the Free Software Foundation, either version 3 of the License, or
|
|
||||||
* (at your option) any later version.
|
|
||||||
*
|
|
||||||
* NewPipe is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
* GNU General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License
|
|
||||||
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test for {@link YoutubeSearchExtractor}
|
|
||||||
*/
|
|
||||||
public class SoundcloudSearchExtractorDefaultTest extends SoundcloudSearchExtractorBaseTest {
|
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
public static void setUpClass() throws Exception {
|
|
||||||
NewPipe.init(DownloaderTestImpl.getInstance());
|
|
||||||
extractor = (SoundcloudSearchExtractor) SoundCloud.getSearchExtractor(
|
|
||||||
new SoundcloudSearchQueryHandlerFactory().fromQuery("lill uzi vert",
|
|
||||||
Arrays.asList(new String[]{"tracks"}), ""));
|
|
||||||
extractor.fetchPage();
|
|
||||||
itemsPage = extractor.getInitialPage();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetSecondPageUrl() throws Exception {
|
|
||||||
assertEquals("https://api-v2.soundcloud.com/search/tracks?q=lill+uzi+vert&limit=10&offset=10",
|
|
||||||
removeClientId(extractor.getNextPageUrl()));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testResultListCheckIfContainsStreamItems() {
|
|
||||||
boolean hasStreams = false;
|
|
||||||
for (InfoItem item : itemsPage.getItems()) {
|
|
||||||
if (item instanceof StreamInfoItem) {
|
|
||||||
hasStreams = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertTrue("Has no InfoItemStreams", hasStreams);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetSecondPage() throws Exception {
|
|
||||||
SoundcloudSearchExtractor secondExtractor =
|
|
||||||
(SoundcloudSearchExtractor) SoundCloud.getSearchExtractor("lill uzi vert");
|
|
||||||
ListExtractor.InfoItemsPage<InfoItem> secondPage = secondExtractor.getPage(itemsPage.getNextPageUrl());
|
|
||||||
assertTrue(Integer.toString(secondPage.getItems().size()),
|
|
||||||
secondPage.getItems().size() >= 10);
|
|
||||||
|
|
||||||
// check if its the same result
|
|
||||||
boolean equals = true;
|
|
||||||
for (int i = 0; i < secondPage.getItems().size()
|
|
||||||
&& i < itemsPage.getItems().size(); i++) {
|
|
||||||
if (!secondPage.getItems().get(i).getUrl().equals(
|
|
||||||
itemsPage.getItems().get(i).getUrl())) {
|
|
||||||
equals = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertFalse("First and second page are equal", equals);
|
|
||||||
|
|
||||||
assertEquals("https://api-v2.soundcloud.com/search/tracks?q=lill+uzi+vert&limit=10&offset=20",
|
|
||||||
removeClientId(secondPage.getNextPageUrl()));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testId() throws Exception {
|
|
||||||
assertEquals("lill uzi vert", extractor.getId());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testName() {
|
|
||||||
assertEquals("lill uzi vert", extractor.getName());
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,135 @@
|
||||||
|
package org.schabi.newpipe.extractor.services.soundcloud.search;
|
||||||
|
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.schabi.newpipe.DownloaderTestImpl;
|
||||||
|
import org.schabi.newpipe.extractor.InfoItem;
|
||||||
|
import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
|
||||||
|
import org.schabi.newpipe.extractor.NewPipe;
|
||||||
|
import org.schabi.newpipe.extractor.StreamingService;
|
||||||
|
import org.schabi.newpipe.extractor.search.SearchExtractor;
|
||||||
|
import org.schabi.newpipe.extractor.services.DefaultSearchExtractorTest;
|
||||||
|
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
import java.io.UnsupportedEncodingException;
|
||||||
|
import java.net.URLEncoder;
|
||||||
|
|
||||||
|
import static java.util.Collections.singletonList;
|
||||||
|
import static org.schabi.newpipe.extractor.ServiceList.SoundCloud;
|
||||||
|
import static org.schabi.newpipe.extractor.services.DefaultTests.assertNoDuplicatedItems;
|
||||||
|
import static org.schabi.newpipe.extractor.services.soundcloud.SoundcloudSearchQueryHandlerFactory.*;
|
||||||
|
|
||||||
|
public class SoundcloudSearchExtractorTest {
|
||||||
|
|
||||||
|
public static class All extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "lill uzi vert";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = SoundCloud.getSearchExtractor(QUERY);
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return SoundCloud; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "soundcloud.com/search?q=" + urlEncode(QUERY); }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "soundcloud.com/search?q=" + urlEncode(QUERY); }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class Tracks extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "lill uzi vert";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = SoundCloud.getSearchExtractor(QUERY, singletonList(TRACKS), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return SoundCloud; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "soundcloud.com/search/tracks?q=" + urlEncode(QUERY); }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "soundcloud.com/search/tracks?q=" + urlEncode(QUERY); }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
|
||||||
|
@Override public InfoItem.InfoType expectedInfoItemType() { return InfoItem.InfoType.STREAM; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class Users extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "lill uzi vert";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = SoundCloud.getSearchExtractor(QUERY, singletonList(USERS), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return SoundCloud; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "soundcloud.com/search/users?q=" + urlEncode(QUERY); }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "soundcloud.com/search/users?q=" + urlEncode(QUERY); }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
|
||||||
|
@Override public InfoItem.InfoType expectedInfoItemType() { return InfoItem.InfoType.CHANNEL; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class Playlists extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "lill uzi vert";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = SoundCloud.getSearchExtractor(QUERY, singletonList(PLAYLISTS), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return SoundCloud; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "soundcloud.com/search/playlists?q=" + urlEncode(QUERY); }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "soundcloud.com/search/playlists?q=" + urlEncode(QUERY); }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
|
||||||
|
@Override public InfoItem.InfoType expectedInfoItemType() { return InfoItem.InfoType.PLAYLIST; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class PagingTest {
|
||||||
|
@Test
|
||||||
|
public void duplicatedItemsCheck() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
final SearchExtractor extractor = SoundCloud.getSearchExtractor("cirque du soleil", singletonList(TRACKS), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
|
||||||
|
final InfoItemsPage<InfoItem> page1 = extractor.getInitialPage();
|
||||||
|
final InfoItemsPage<InfoItem> page2 = extractor.getPage(page1.getNextPageUrl());
|
||||||
|
|
||||||
|
assertNoDuplicatedItems(SoundCloud, page1, page2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String urlEncode(String value) {
|
||||||
|
try {
|
||||||
|
return URLEncoder.encode(value, CHARSET_UTF_8);
|
||||||
|
} catch (UnsupportedEncodingException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,50 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.youtube.search;
|
|
||||||
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeSearchExtractor;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Created by Christian Schabesberger on 27.05.18
|
|
||||||
*
|
|
||||||
* Copyright (C) Christian Schabesberger 2018 <chris.schabesberger@mailbox.org>
|
|
||||||
* YoutubeSearchExtractorBaseTest.java is part of NewPipe.
|
|
||||||
*
|
|
||||||
* NewPipe is free software: you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License as published by
|
|
||||||
* the Free Software Foundation, either version 3 of the License, or
|
|
||||||
* (at your option) any later version.
|
|
||||||
*
|
|
||||||
* NewPipe is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
* GNU General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License
|
|
||||||
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test for {@link YoutubeSearchExtractor}
|
|
||||||
*/
|
|
||||||
public abstract class YoutubeSearchExtractorBaseTest {
|
|
||||||
|
|
||||||
protected static YoutubeSearchExtractor extractor;
|
|
||||||
protected static ListExtractor.InfoItemsPage<InfoItem> itemsPage;
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testResultListElementsLength() {
|
|
||||||
assertTrue(Integer.toString(itemsPage.getItems().size()),
|
|
||||||
itemsPage.getItems().size() > 10);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testUrl() throws Exception {
|
|
||||||
assertTrue(extractor.getUrl(), extractor.getUrl().startsWith("https://www.youtube.com"));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,115 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.youtube.search;
|
|
||||||
|
|
||||||
import org.hamcrest.CoreMatchers;
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Ignore;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.DownloaderTestImpl;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.NewPipe;
|
|
||||||
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeSearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory;
|
|
||||||
|
|
||||||
import java.net.URL;
|
|
||||||
import java.net.URLDecoder;
|
|
||||||
import java.util.LinkedHashMap;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import static java.util.Arrays.asList;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
import static org.schabi.newpipe.extractor.ServiceList.YouTube;
|
|
||||||
|
|
||||||
public class YoutubeSearchExtractorChannelOnlyTest extends YoutubeSearchExtractorBaseTest {
|
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
public static void setUpClass() throws Exception {
|
|
||||||
NewPipe.init(DownloaderTestImpl.getInstance());
|
|
||||||
extractor = (YoutubeSearchExtractor) YouTube.getSearchExtractor("pewdiepie",
|
|
||||||
asList(YoutubeSearchQueryHandlerFactory.CHANNELS), null);
|
|
||||||
extractor.fetchPage();
|
|
||||||
itemsPage = extractor.getInitialPage();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetSecondPage() throws Exception {
|
|
||||||
YoutubeSearchExtractor secondExtractor = (YoutubeSearchExtractor) YouTube.getSearchExtractor("pewdiepie",
|
|
||||||
asList(YoutubeSearchQueryHandlerFactory.CHANNELS), null);
|
|
||||||
ListExtractor.InfoItemsPage<InfoItem> secondPage = secondExtractor.getPage(itemsPage.getNextPageUrl());
|
|
||||||
assertTrue(Integer.toString(secondPage.getItems().size()),
|
|
||||||
secondPage.getItems().size() > 10);
|
|
||||||
|
|
||||||
// check if its the same result
|
|
||||||
boolean equals = true;
|
|
||||||
for (int i = 0; i < secondPage.getItems().size()
|
|
||||||
&& i < itemsPage.getItems().size(); i++) {
|
|
||||||
if (!secondPage.getItems().get(i).getUrl().equals(
|
|
||||||
itemsPage.getItems().get(i).getUrl())) {
|
|
||||||
equals = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertFalse("First and second page are equal", equals);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetSecondPageUrl() throws Exception {
|
|
||||||
URL url = new URL(extractor.getNextPageUrl());
|
|
||||||
|
|
||||||
assertEquals(url.getHost(), "www.youtube.com");
|
|
||||||
assertEquals(url.getPath(), "/results");
|
|
||||||
|
|
||||||
Map<String, String> queryPairs = new LinkedHashMap<>();
|
|
||||||
for (String queryPair : url.getQuery().split("&")) {
|
|
||||||
int index = queryPair.indexOf("=");
|
|
||||||
queryPairs.put(URLDecoder.decode(queryPair.substring(0, index), "UTF-8"),
|
|
||||||
URLDecoder.decode(queryPair.substring(index + 1), "UTF-8"));
|
|
||||||
}
|
|
||||||
|
|
||||||
assertEquals("pewdiepie", queryPairs.get("search_query"));
|
|
||||||
assertEquals(queryPairs.get("ctoken"), queryPairs.get("continuation"));
|
|
||||||
assertTrue(queryPairs.get("continuation").length() > 5);
|
|
||||||
assertTrue(queryPairs.get("itct").length() > 5);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Ignore
|
|
||||||
@Test
|
|
||||||
public void testOnlyContainChannels() {
|
|
||||||
for (InfoItem item : itemsPage.getItems()) {
|
|
||||||
if (!(item instanceof ChannelInfoItem)) {
|
|
||||||
fail("The following item is no channel item: " + item.toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testChannelUrl() {
|
|
||||||
for (InfoItem item : itemsPage.getItems()) {
|
|
||||||
if (item instanceof ChannelInfoItem) {
|
|
||||||
ChannelInfoItem channel = (ChannelInfoItem) item;
|
|
||||||
|
|
||||||
if (channel.getSubscriberCount() > 1e8) { // the real PewDiePie
|
|
||||||
assertEquals("https://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw", item.getUrl());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (InfoItem item : itemsPage.getItems()) {
|
|
||||||
if (item instanceof ChannelInfoItem) {
|
|
||||||
assertThat(item.getUrl(), CoreMatchers.startsWith("https://www.youtube.com/channel/"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testStreamCount() {
|
|
||||||
ChannelInfoItem ci = (ChannelInfoItem) itemsPage.getItems().get(0);
|
|
||||||
assertTrue("Stream count does not fit: " + ci.getStreamCount(),
|
|
||||||
4000 < ci.getStreamCount() && ci.getStreamCount() < 5500);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,142 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.youtube.search;
|
|
||||||
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.DownloaderTestImpl;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.NewPipe;
|
|
||||||
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeSearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
|
|
||||||
|
|
||||||
import java.net.URL;
|
|
||||||
import java.net.URLDecoder;
|
|
||||||
import java.util.LinkedHashMap;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
|
||||||
import static org.schabi.newpipe.extractor.ServiceList.YouTube;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Created by Christian Schabesberger on 27.05.18
|
|
||||||
*
|
|
||||||
* Copyright (C) Christian Schabesberger 2018 <chris.schabesberger@mailbox.org>
|
|
||||||
* YoutubeSearchExtractorStreamTest.java is part of NewPipe.
|
|
||||||
*
|
|
||||||
* NewPipe is free software: you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License as published by
|
|
||||||
* the Free Software Foundation, either version 3 of the License, or
|
|
||||||
* (at your option) any later version.
|
|
||||||
*
|
|
||||||
* NewPipe is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
* GNU General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License
|
|
||||||
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test for {@link YoutubeSearchExtractor}
|
|
||||||
*/
|
|
||||||
public class YoutubeSearchExtractorDefaultTest extends YoutubeSearchExtractorBaseTest {
|
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
public static void setUpClass() throws Exception {
|
|
||||||
NewPipe.init(DownloaderTestImpl.getInstance());
|
|
||||||
extractor = (YoutubeSearchExtractor) YouTube.getSearchExtractor("pewdiepie");
|
|
||||||
extractor.fetchPage();
|
|
||||||
itemsPage = extractor.getInitialPage();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetUrl() throws Exception {
|
|
||||||
assertEquals("https://www.youtube.com/results?search_query=pewdiepie&gl=GB", extractor.getUrl());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetSecondPageUrl() throws Exception {
|
|
||||||
URL url = new URL(extractor.getNextPageUrl());
|
|
||||||
|
|
||||||
assertEquals(url.getHost(), "www.youtube.com");
|
|
||||||
assertEquals(url.getPath(), "/results");
|
|
||||||
|
|
||||||
Map<String, String> queryPairs = new LinkedHashMap<>();
|
|
||||||
for (String queryPair : url.getQuery().split("&")) {
|
|
||||||
int index = queryPair.indexOf("=");
|
|
||||||
queryPairs.put(URLDecoder.decode(queryPair.substring(0, index), "UTF-8"),
|
|
||||||
URLDecoder.decode(queryPair.substring(index + 1), "UTF-8"));
|
|
||||||
}
|
|
||||||
|
|
||||||
assertEquals("pewdiepie", queryPairs.get("search_query"));
|
|
||||||
assertEquals(queryPairs.get("ctoken"), queryPairs.get("continuation"));
|
|
||||||
assertTrue(queryPairs.get("continuation").length() > 5);
|
|
||||||
assertTrue(queryPairs.get("itct").length() > 5);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testResultList_FirstElement() {
|
|
||||||
InfoItem firstInfoItem = itemsPage.getItems().get(0);
|
|
||||||
InfoItem secondInfoItem = itemsPage.getItems().get(1);
|
|
||||||
|
|
||||||
InfoItem channelItem = firstInfoItem instanceof ChannelInfoItem ? firstInfoItem
|
|
||||||
: secondInfoItem;
|
|
||||||
|
|
||||||
// The channel should be the first item
|
|
||||||
assertTrue((firstInfoItem instanceof ChannelInfoItem)
|
|
||||||
|| (secondInfoItem instanceof ChannelInfoItem));
|
|
||||||
assertEquals("name", "PewDiePie", channelItem.getName());
|
|
||||||
assertEquals("url", "https://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw", channelItem.getUrl());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testResultListCheckIfContainsStreamItems() {
|
|
||||||
boolean hasStreams = false;
|
|
||||||
for (InfoItem item : itemsPage.getItems()) {
|
|
||||||
if (item instanceof StreamInfoItem) {
|
|
||||||
hasStreams = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertTrue("Has no InfoItemStreams", hasStreams);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetSecondPage() throws Exception {
|
|
||||||
YoutubeSearchExtractor secondExtractor =
|
|
||||||
(YoutubeSearchExtractor) YouTube.getSearchExtractor("pewdiepie");
|
|
||||||
ListExtractor.InfoItemsPage<InfoItem> secondPage = secondExtractor.getPage(itemsPage.getNextPageUrl());
|
|
||||||
assertTrue(Integer.toString(secondPage.getItems().size()),
|
|
||||||
secondPage.getItems().size() > 10);
|
|
||||||
|
|
||||||
// check if its the same result
|
|
||||||
boolean equals = true;
|
|
||||||
for (int i = 0; i < secondPage.getItems().size()
|
|
||||||
&& i < itemsPage.getItems().size(); i++) {
|
|
||||||
if (!secondPage.getItems().get(i).getUrl().equals(
|
|
||||||
itemsPage.getItems().get(i).getUrl())) {
|
|
||||||
equals = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertFalse("First and second page are equal", equals);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSuggestionNotNull() throws Exception {
|
|
||||||
//todo write a real test
|
|
||||||
assertNotNull(extractor.getSearchSuggestion());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testId() throws Exception {
|
|
||||||
assertEquals("pewdiepie", extractor.getId());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testName() {
|
|
||||||
assertEquals("pewdiepie", extractor.getName());
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,190 @@
|
||||||
|
package org.schabi.newpipe.extractor.services.youtube.search;
|
||||||
|
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.schabi.newpipe.DownloaderTestImpl;
|
||||||
|
import org.schabi.newpipe.extractor.InfoItem;
|
||||||
|
import org.schabi.newpipe.extractor.ListExtractor;
|
||||||
|
import org.schabi.newpipe.extractor.NewPipe;
|
||||||
|
import org.schabi.newpipe.extractor.StreamingService;
|
||||||
|
import org.schabi.newpipe.extractor.search.SearchExtractor;
|
||||||
|
import org.schabi.newpipe.extractor.services.DefaultSearchExtractorTest;
|
||||||
|
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
|
||||||
|
import static java.util.Collections.singletonList;
|
||||||
|
import static junit.framework.TestCase.assertFalse;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.schabi.newpipe.extractor.ExtractorAsserts.assertEmptyErrors;
|
||||||
|
import static org.schabi.newpipe.extractor.ServiceList.YouTube;
|
||||||
|
import static org.schabi.newpipe.extractor.services.DefaultTests.assertNoDuplicatedItems;
|
||||||
|
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory.*;
|
||||||
|
|
||||||
|
public class YoutubeSearchExtractorTest {
|
||||||
|
public static class All extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "test";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = YouTube.getSearchExtractor(QUERY);
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return YouTube; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class Channel extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "test";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = YouTube.getSearchExtractor(QUERY, singletonList(CHANNELS), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return YouTube; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
|
||||||
|
@Override public InfoItem.InfoType expectedInfoItemType() { return InfoItem.InfoType.CHANNEL; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class Playlists extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "test";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = YouTube.getSearchExtractor(QUERY, singletonList(PLAYLISTS), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return YouTube; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
|
||||||
|
@Override public InfoItem.InfoType expectedInfoItemType() { return InfoItem.InfoType.PLAYLIST; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class Videos extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "test";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = YouTube.getSearchExtractor(QUERY, singletonList(VIDEOS), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return YouTube; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
|
||||||
|
@Override public InfoItem.InfoType expectedInfoItemType() { return InfoItem.InfoType.STREAM; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class Suggestion extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "pewdeipie";
|
||||||
|
private static final String EXPECTED_SUGGESTION = "pewdiepie";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = YouTube.getSearchExtractor(QUERY, singletonList(VIDEOS), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return YouTube; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return EXPECTED_SUGGESTION; }
|
||||||
|
|
||||||
|
@Override public InfoItem.InfoType expectedInfoItemType() { return InfoItem.InfoType.STREAM; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class RandomQueryNoMorePages extends DefaultSearchExtractorTest {
|
||||||
|
private static SearchExtractor extractor;
|
||||||
|
private static final String QUERY = "UCO6AK";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
extractor = YouTube.getSearchExtractor(QUERY);
|
||||||
|
extractor.fetchPage();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override public SearchExtractor extractor() { return extractor; }
|
||||||
|
@Override public StreamingService expectedService() { return YouTube; }
|
||||||
|
@Override public String expectedName() { return QUERY; }
|
||||||
|
@Override public String expectedId() { return QUERY; }
|
||||||
|
@Override public String expectedUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedOriginalUrlContains() { return "youtube.com/results?search_query=" + QUERY; }
|
||||||
|
@Override public String expectedSearchString() { return QUERY; }
|
||||||
|
@Nullable @Override public String expectedSearchSuggestion() { return null; }
|
||||||
|
|
||||||
|
/*//////////////////////////////////////////////////////////////////////////
|
||||||
|
// Test Overrides
|
||||||
|
//////////////////////////////////////////////////////////////////////////*/
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMoreRelatedItems() throws Exception {
|
||||||
|
// YouTube actually gives us an empty next page, but after that, no more pages.
|
||||||
|
assertTrue(extractor.hasNextPage());
|
||||||
|
final ListExtractor.InfoItemsPage<InfoItem> nextEmptyPage = extractor.getPage(extractor.getNextPageUrl());
|
||||||
|
assertEquals(0, nextEmptyPage.getItems().size());
|
||||||
|
assertEmptyErrors("Empty page has errors", nextEmptyPage.getErrors());
|
||||||
|
|
||||||
|
assertFalse("More items available when it shouldn't", nextEmptyPage.hasNextPage());
|
||||||
|
final String nextPageUrl = nextEmptyPage.getNextPageUrl();
|
||||||
|
assertTrue("Next page is not empty or null", nextPageUrl == null || nextPageUrl.isEmpty());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class PagingTest {
|
||||||
|
@Test
|
||||||
|
public void duplicatedItemsCheck() throws Exception {
|
||||||
|
NewPipe.init(DownloaderTestImpl.getInstance());
|
||||||
|
final SearchExtractor extractor = YouTube.getSearchExtractor("cirque du soleil", singletonList(VIDEOS), "");
|
||||||
|
extractor.fetchPage();
|
||||||
|
|
||||||
|
final ListExtractor.InfoItemsPage<InfoItem> page1 = extractor.getInitialPage();
|
||||||
|
final ListExtractor.InfoItemsPage<InfoItem> page2 = extractor.getPage(page1.getNextPageUrl());
|
||||||
|
|
||||||
|
assertNoDuplicatedItems(YouTube, page1, page2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,69 +0,0 @@
|
||||||
package org.schabi.newpipe.extractor.services.youtube.search;
|
|
||||||
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.schabi.newpipe.DownloaderTestImpl;
|
|
||||||
import org.schabi.newpipe.extractor.InfoItem;
|
|
||||||
import org.schabi.newpipe.extractor.ListExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.NewPipe;
|
|
||||||
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeSearchExtractor;
|
|
||||||
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory;
|
|
||||||
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
import static java.util.Collections.singletonList;
|
|
||||||
import static org.junit.Assert.*;
|
|
||||||
import static org.schabi.newpipe.extractor.ServiceList.YouTube;
|
|
||||||
|
|
||||||
public class YoutubeSearchPagingTest {
|
|
||||||
private static ListExtractor.InfoItemsPage<InfoItem> page1;
|
|
||||||
private static ListExtractor.InfoItemsPage<InfoItem> page2;
|
|
||||||
private static Set<String> urlList1;
|
|
||||||
private static Set<String> urlList2;
|
|
||||||
private static int page1Size;
|
|
||||||
private static int page2Size;
|
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
public static void setUpClass() throws Exception {
|
|
||||||
NewPipe.init(DownloaderTestImpl.getInstance());
|
|
||||||
|
|
||||||
YoutubeSearchExtractor extractor = (YoutubeSearchExtractor) YouTube.getSearchExtractor("cirque du soleil",
|
|
||||||
singletonList(YoutubeSearchQueryHandlerFactory.VIDEOS), null);
|
|
||||||
|
|
||||||
extractor.fetchPage();
|
|
||||||
page1 = extractor.getInitialPage();
|
|
||||||
urlList1 = extractUrls(page1.getItems());
|
|
||||||
assertTrue("failed to load search result page one: too few items", 15 < page1.getItems().size());
|
|
||||||
page1Size = page1.getItems().size();
|
|
||||||
assertEquals("duplicated items in search result on page one", page1Size, urlList1.size());
|
|
||||||
|
|
||||||
assertTrue("search result has no second page", page1.hasNextPage());
|
|
||||||
assertNotNull("next page url is null", page1.getNextPageUrl());
|
|
||||||
page2 = extractor.getPage(page1.getNextPageUrl());
|
|
||||||
urlList2 = extractUrls(page2.getItems());
|
|
||||||
page2Size = page2.getItems().size();
|
|
||||||
}
|
|
||||||
|
|
||||||
private static Set<String> extractUrls(List<InfoItem> list) {
|
|
||||||
Set<String> result = new HashSet<>();
|
|
||||||
for (InfoItem item : list) {
|
|
||||||
result.add(item.getUrl());
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void secondPageUniqueVideos() {
|
|
||||||
assertEquals("Second search result page has duplicated items", page2Size, urlList2.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void noRepeatingVideosInPages() {
|
|
||||||
Set<String> intersection = new HashSet<>(urlList2);
|
|
||||||
intersection.retainAll(urlList1);
|
|
||||||
assertEquals("Found the same item on first AND second search page", 0, intersection.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
Loading…
Reference in New Issue