if ($l == '') { continue; } $wikitext[] = $l; } $wikitext = implode("\n", $wikitext); $whatsthis = 'articlelist'; } # QUICK HACK! NEEDS TO WORK! if ($format == "odt" || $format == "odt_xml" || $format == "docbook_pdf" || $format == "docbook_html" || $format == "docbook_xml") { $xmlg["allow_xml_temp_files"] = false; } if ($whatsthis == "wikitext") { $content_provider->first_title = "Raw wikitext page"; $wiki2xml_authors = array(); $xml = $converter->article2xml("", $wikitext, $xmlg); } else { if ($xmlg['allow_xml_temp_files']) { $xmlg['use_xml_temp_files'] = true; } foreach (explode("\n", $wikitext) as $a) { push_article($aArticles, $a); } # set the first article name as the default title if ($xmlg["book_title"] == '') { $xmlg["book_title"] = $aArticles[0]; } # as long as we have articles to convert (this might change in between!) while ($a = array_shift($aArticles)) { $wiki2xml_authors = array(); # Article page|Article name
return $default; } return $_REQUEST[$key]; } # MAIN @set_time_limit(0); # No time limit $xmlg = array('site_base_url' => "SBU", 'resolvetemplates' => true, 'templates' => array(), 'namespace_template' => 'Vorlage'); $content_provider = new ContentProviderTextFile(); $converter = new MediaWikiConverter(); $title = urldecode(get_param('title', urlencode('Main Page'))); $xmlg['page_title'] = $title; $format = strtolower(get_param('format', 'xhtml')); $content_provider->basedir = $base_text_dir; $text = $content_provider->get_wiki_text($title); $xml = $converter->article2xml($title, $text, $xmlg); if ($format == "xml") { # XML header('Content-type: text/xml; charset=utf-8'); print "<?xml version='1.0' encoding='UTF-8' ?>\n"; print $xml; } else { if ($format == "text") { # Plain text $xmlg['plaintext_markup'] = true; $xmlg['plaintext_prelink'] = true; $out = $converter->articles2text($xml, $xmlg); $out = str_replace("\n", "<br/>", $out); header('Content-type: text/html; charset=utf-8'); print $out; } else {
$xmlg['templates'] = array(); $xmlg['add_gfdl'] = false; $xmlg['keep_interlanguage'] = true; $xmlg['keep_categories'] = true; $xmlg['xml_articles_header'] = "<articles>"; $xmlg['xhtml_justify'] = false; $xmlg['xhtml_logical_markup'] = false; $xmlg['xhtml_source'] = false; $cnt = 1; print "<table border=1 width='100%'><tr><th>Test</th><th>Result</th><th>wiki2xml</th><th>Input</th><th>XML</th></tr>"; foreach ($tests as $t) { $res = $t->result; $col = ''; $content_provider = new ContentProviderHTTP(); $converter = new MediaWikiConverter(); $xml = $converter->article2xml("", $t->input, $xmlg); $nr = $converter->articles2xhtml($xml, $xmlg); $nr = array_pop(explode('<body>', $nr, 2)); $nr = array_shift(explode('</body>', $nr, 2)); # Fixing things to compare to the stupid parser test formatting $res = trim($res); $res = str_replace("<li> ", "<li>", $res); $res = str_replace("<dd> ", "<dd>", $res); $res = str_replace("\n<", "<", $res); $res = str_replace("\n", " ", $res); $res = str_replace(" </p>", "</p>", $res); do { $o = $res; $res = str_replace(" ", " ", $res); } while ($o != $res); $nr = trim($nr);