private void GetItemList(int task_ID,List<CrawlerResult> arrayList,string group_url) { MatchCollection matchList; MatchCollection tempMatch; DownWebFile df = new DownWebFile(); var _html = df.GetPageData(group_url,"utf-8"); Regex regExtractHtml = new Regex(@"<table class=""olt"">[\s\S]+?</table>",RegexOptions.None); Regex regLi = new Regex(@"<tr class="""">[\s\S]+?</tr>",RegexOptions.None); Regex regexTitle = new Regex(@"<td class=""title"">[\s\S]+?</td>",RegexOptions.None); Regex regexHref = new Regex(@"<a [^>]+?>[\s\S]+?</a>",RegexOptions.None); Regex regexAuthor = new Regex(@"<td Nowrap=""Nowrap"">[\s\S]+?</td>",RegexOptions.None); matchList = regExtractHtml.Matches(_html); if (matchList.Count < 1) return; var _contentHtml = matchList[0].Value; matchList = regLi.Matches(_contentHtml); for (int i = 0; i < matchList.Count; i++) { var _text = matchList[i].Value; CrawlerResult item = new CrawlerResult(); item.Task_ID = task_ID; tempMatch = regexTitle.Matches(_text); if (tempMatch.Count < 0) continue; item.Title = CommonFunction.DeleteHTMLElement(tempMatch[0].Value); tempMatch = regexHref.Matches(tempMatch[0].Value); if (tempMatch.Count < 0) continue; item.Url = GetURL(tempMatch[0].Value); if (item.Url.IndexOf("http://") < 0) continue; tempMatch = regexAuthor.Matches(_text); if (tempMatch.Count > 0) { item.Author = CommonFunction.DeleteHTMLElement(tempMatch[0].Value); } item.SiteName = "XX"; item.FilterType = IWOMWebCrawlerDbLayer.Common.FilterType.FilterNo; arrayList.Add(item); } }
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。