01: package net.javacoding.jspider.core.rule.impl;
02:
03: import net.javacoding.jspider.api.model.Decision;
04: import net.javacoding.jspider.api.model.Site;
05: import net.javacoding.jspider.core.SpiderContext;
06: import net.javacoding.jspider.core.model.DecisionInternal;
07:
08: import java.net.URL;
09:
10: /**
11: * $Id: RobotsTXTSkippedRule.java,v 1.2 2003/04/03 15:57:16 vanrogu Exp $
12: */
13: public class RobotsTXTSkippedRule extends BaseRuleImpl {
14:
15: /**
16: * Applies the rule to the given url.
17: * @param context the context we're spidering under
18: * @param currentSite the site we're currently spidering
19: * @param url the url to be evaluated
20: * @return Decision object telling whether the url should be accepted.
21: * This implementation always returns an accept decision.
22: */
23: public Decision apply(SpiderContext context, Site currentSite,
24: URL url) {
25: return new DecisionInternal(Decision.RULE_ACCEPT,
26: "robots.txt was skipped for site - so all resources are accepted");
27: }
28:
29: }
|