sort($cats);
$cats = array_unique($cats);
// get all pages
$pages = array();
foreach ($cats as $cat) {
    $results = WikiPhoto::getPages($dbr, $cat);
    // make results unique based on page_id
    foreach ($results as $result) {
        $pages[$result['id']] = $result;
    }
}
$pages = array_values($pages);
shuffle($pages);
$lines = array();
foreach ($pages as $page) {
    if (WikiPhoto::articleBodyHasNoImages($dbr, $page['id'])) {
        $lines[] = array(WikiPhoto::BASE_URL . "{$page['key']}", $page['id']);
        if (count($lines) >= $numArticles) {
            break;
        }
    }
}
$fp = fopen($file, 'w');
if ($fp) {
    foreach ($lines as $line) {
        fputcsv($fp, $line);
    }
    fclose($fp);
    print "output is in {$file}\n";
} else {
    print "unable to open file {$file}\n";
Esempio n. 2
0
 /**
  * Execute special page.  Only available to wikihow staff.
  */
 function execute()
 {
     global $wgRequest, $wgOut, $wgUser, $wgLang;
     //$userGroups = $wgUser->getGroups();
     if ($wgUser->isBlocked()) {
         $wgOut->setRobotpolicy('noindex,nofollow');
         $wgOut->errorpage('nosuchspecialpage', 'nospecialpagetext');
         return;
     }
     if ($wgRequest->wasPosted()) {
         $dbr = wfGetDB(DB_SLAVE);
         $pageList = $wgRequest->getVal('pages-list', '');
         $wgOut->setArticleBodyOnly(true);
         $pageList = preg_split('@[\\r\\n]+@', $pageList);
         foreach ($pageList as $url) {
             $url = trim($url);
             if (!empty($url)) {
                 $id = WikiPhoto::getArticleID($url);
                 $images = '';
                 if (!empty($id)) {
                     $hasNoImages = WikiPhoto::articleBodyHasNoImages($dbr, $id);
                     $images = $hasNoImages ? 'no' : 'yes';
                 }
                 $urls[] = array('url' => $url, 'id' => $id, 'images' => $images);
             }
         }
         $html = '<style>.tres tr:nth-child(even) {background: #ccc;}</style>';
         $html .= '<table class="tres"><tr><th width="450px">URL</th><th>ID</th><th>Has steps images?</th></tr>';
         foreach ($urls as $row) {
             $html .= "<tr><td><a href='{$row['url']}'>{$row['url']}</a></td><td>{$row['id']}</td><td>{$row['images']}</td></tr>";
         }
         $html .= '</table>';
         $result = array('result' => $html);
         print json_encode($result);
         return;
     }
     $wgOut->setHTMLTitle('Admin - Lookup Pages - wikiHow');
     $tmpl = self::getGuts('AdminLookupPages');
     $wgOut->addHTML($tmpl);
 }
<?php

/*
 * Opens a CSV file and adds page IDs to the file.  Part of the WikiPhoto 
 * project.
 */
require_once 'commandLine.inc';
if (count($argv) < 2) {
    print "usage: php wikiphotoAddPageIDs.php infile.csv outfile.csv\n";
    exit;
}
$infile = $argv[0];
$outfile = $argv[1];
$in = fopen($infile, 'r');
$out = fopen($outfile, 'w');
if (!$in || !$out) {
    print "error: opening a file\n";
    exit;
}
$dbr = wfGetDB(DB_SLAVE);
while (($data = fgetcsv($in)) !== false) {
    $id = WikiPhoto::getArticleID($data[0]);
    $data[1] = $id;
    $data[2] = '';
    if (!empty($id)) {
        $hasNoImages = WikiPhoto::articleBodyHasNoImages($dbr, $id);
        $images = intval(!$hasNoImages);
        $data[2] = $images;
    }
    fputcsv($out, $data);
}