/** Check for modifications in the database and update the changelog table accordingly. */ public static function update_clog_table() { $now = Db::select_value("select date_add(now(), interval -1 minute)"); # See issue 157. $last_update = Okapi::get_var('last_clog_update'); if ($last_update === null) { $last_update = Db::select_value("select date_add(now(), interval -1 day)"); } # Usually this will be fast. But, for example, if admin changes ALL the # caches, this will take forever. But we still want it to finish properly # without interruption. set_time_limit(0); ignore_user_abort(true); # Get the list of modified cache codes. Split it into groups of N cache codes. # Note that we should include ALL cache codes in this particular query, not # only "status in (1,2,3)". This way, when the cache changes its status, e.g. # from 3 to 6, changelog will include a proper "delete" statement. $cache_codes = Db::select_column("\n select wp_oc\n from caches\n where okapi_syncbase > '" . Db::escape_string($last_update) . "';\n "); $cache_code_groups = Okapi::make_groups($cache_codes, 50); unset($cache_codes); # For each group, update the changelog table accordingly. foreach ($cache_code_groups as $cache_codes) { self::generate_changelog_entries('services/caches/geocaches', 'geocache', 'cache_codes', 'code', $cache_codes, self::$logged_cache_fields, false, true, null); } # Same as above, for log entries. $offset = 0; while (true) { $log_uuids = Db::select_column("\n select uuid\n from cache_logs\n where okapi_syncbase > '" . Db::escape_string($last_update) . "'\n limit {$offset}, 10000;\n "); if (count($log_uuids) == 0) { break; } $offset += 10000; $log_uuid_groups = Okapi::make_groups($log_uuids, 100); unset($log_uuids); foreach ($log_uuid_groups as $log_uuids) { self::generate_changelog_entries('services/logs/entries', 'log', 'log_uuids', 'uuid', $log_uuids, self::$logged_log_entry_fields, false, true, 3600); } } if (Settings::get('OC_BRANCH') == 'oc.de') { # On OCDE branch, deleted log entries are MOVED to another table. # So the above queries won't detect them. We need to run one more. # We will assume there are not so many of them and we don't have to # split them in groups of 10000 as we did above. $DELETED_uuids = Db::select_column("\n select uuid\n from cache_logs_archived\n where okapi_syncbase > '" . Db::escape_string($last_update) . "'\n "); $deleted_uuid_groups = Okapi::make_groups($DELETED_uuids, 100); unset($DELETED_uuids); foreach ($deleted_uuid_groups as $deleted_uuids) { self::generate_changelog_entries('services/logs/entries', 'log', 'log_uuids', 'uuid', $deleted_uuids, self::$logged_log_entry_fields, false, true, 3600); } } # Update state variables. Okapi::set_var("last_clog_update", $now); $revision = Db::select_value("select max(id) from okapi_clog"); Okapi::set_var("clog_revision", $revision); }
public static function get_current_version() { try { return Okapi::get_var('db_version', 0) + 0; } catch (Exception $e) { if (strpos($e->getMessage(), "okapi_vars' doesn't exist") !== false) { return 0; } throw $e; } }
/** * Check if any cron-5 cronjobs are scheduled to execute and execute * them if needed. Reschedule for new executions. */ public static function execute_cron5_cronjobs() { $nearest_event = Okapi::get_var("cron_nearest_event"); if ($nearest_event + 0 <= time()) { set_time_limit(0); ignore_user_abort(true); require_once $GLOBALS['rootpath'] . "okapi/cronjobs.php"; $nearest_event = CronJobController::run_jobs('cron-5'); Okapi::set_var("cron_nearest_event", $nearest_event); } }
public static function call() { # This is a hidden page for OKAPI developers. It will output a cronjobs # report. This is useful for debugging. $response = new OkapiHttpResponse(); $response->content_type = "text/plain; charset=utf-8"; ob_start(); require_once $GLOBALS['rootpath'] . "okapi/cronjobs.php"; $schedule = Cache::get("cron_schedule"); if ($schedule == null) { $schedule = array(); } print "Nearest event: "; if (Okapi::get_var('cron_nearest_event')) { print "in " . (Okapi::get_var('cron_nearest_event') - time()) . " seconds.\n\n"; } else { print "NOT SET\n\n"; } $cronjobs = CronJobController::get_enabled_cronjobs(); usort($cronjobs, function ($a, $b) { $cmp = function ($a, $b) { return $a < $b ? -1 : ($a > $b ? 1 : 0); }; $by_type = $cmp($a->get_type(), $b->get_type()); if ($by_type != 0) { return $by_type; } return $cmp($a->get_name(), $b->get_name()); }); print str_pad("TYPE", 11) . " " . str_pad("NAME", 40) . " SCHEDULE\n"; print str_pad("----", 11) . " " . str_pad("----", 40) . " --------\n"; foreach ($cronjobs as $cronjob) { $type = $cronjob->get_type(); $name = $cronjob->get_name(); print str_pad($type, 11) . " " . str_pad($name, 40) . " "; if (!isset($schedule[$name])) { print "NOT YET SCHEDULED\n"; } elseif ($schedule[$name] <= time()) { print "DELAYED: should be run " . (time() - $schedule[$name]) . " seconds ago\n"; } else { print "scheduled to run in " . str_pad($schedule[$name] - time(), 6, " ", STR_PAD_LEFT) . " seconds\n"; } } print "\n"; print "Crontab last ping: "; if (Cache::get('crontab_last_ping')) { print time() - Cache::get('crontab_last_ping') . " seconds ago"; } else { print "NEVER"; } print " (crontab_check_counter: " . Cache::get('crontab_check_counter') . ").\n"; print "clog_revisions_daily: "; if (Cache::get('clog_revisions_daily')) { foreach (Cache::get('clog_revisions_daily') as $time => $rev) { print "{$rev} "; } print "\n"; } else { print "NULL\n"; } $response->body = ob_get_clean(); return $response; }
public function execute() { $current_clog_revision = Okapi::get_var('clog_revision', 0); $tiletree_revision = Okapi::get_var('clog_followup_revision', 0); if ($tiletree_revision === $current_clog_revision) { # No update necessary. } elseif ($tiletree_revision < $current_clog_revision) { require_once $GLOBALS['rootpath'] . "okapi/services/caches/map/replicate_listener.inc.php"; if ($current_clog_revision - $tiletree_revision < 30000) { for ($timeout = time() + 240; time() < $timeout;) { try { $response = OkapiServiceRunner::call('services/replicate/changelog', new OkapiInternalRequest(new OkapiInternalConsumer(), null, array('since' => $tiletree_revision))); \okapi\services\caches\map\ReplicateListener::receive($response['changelog']); $tiletree_revision = $response['revision']; Okapi::set_var('clog_followup_revision', $tiletree_revision); if (!$response['more']) { break; } } catch (BadRequest $e) { # Invalid 'since' parameter? May happen when crontab was # not working for more than 10 days. Or, just after OKAPI # is installed (and this is the first time this cronjob # if being run). $mail_admins = $tiletree_revision > 0; \okapi\services\caches\map\ReplicateListener::reset(); Okapi::set_var('clog_followup_revision', $current_clog_revision); break; } } } else { # Some kind of bigger update. Resetting TileTree might be a better option. \okapi\services\caches\map\ReplicateListener::reset(); Okapi::set_var('clog_followup_revision', $current_clog_revision); } } }
/** * Scan the database and compare the current values of old entries to * the cached values of the same entries. If differences found, update * okapi_syncbase accordingly, and email the OKAPI developers. * * Currently, only caches are checked (log entries are not). */ public static function verify_clog_consistency($force_all = false, $geocache_ignored_fields = null) { set_time_limit(0); ignore_user_abort(true); # We will SKIP the entries which have been modified SINCE one day ago. # Such entries might have not been seen by the update_clog_table() yet # (e.g. other long-running cronjob is preventing update_clog_table from # running). # # If $force_all is true, then all caches will be verified. This is # quite important when used in conjunction with ignored_fields. $cache_codes = Db::select_column("\n select wp_oc\n from caches\n " . ($force_all ? "" : "where okapi_syncbase < date_add(now(), interval -1 day)") . "\n "); $cache_code_groups = Okapi::make_groups($cache_codes, 50); unset($cache_codes); # For each group, get the changelog entries, but don't store them # (the "fulldump" mode). Instead, just update the okapi_syncbase column. $sum = 0; $two_examples = array(); foreach ($cache_code_groups as $cache_codes) { $entries = self::generate_changelog_entries('services/caches/geocaches', 'geocache', 'cache_codes', 'code', $cache_codes, self::$logged_cache_fields, true, true, null); foreach ($entries as $entry) { if ($entry['object_type'] != 'geocache') { continue; } $cache_code = $entry['object_key']['code']; if ($entry['change_type'] == 'replace' && $geocache_ignored_fields != null) { # We were called with a non-null ignored fields. Probably # this call originated from the database update script # and new fields have been added to the replicate module. # We will ignore such new fields - this way no unnecessary # clog entries will be created. foreach ($geocache_ignored_fields as $field) { unset($entry['data'][$field]); } if (count($entry['data']) == 0) { # Skip this geocache. Nothing was changed here, only # new fields have been added. continue; } } # We will story the first and the last entry in the $two_examples # vars which is to be emailed to OKAPI developers. if (count($two_examples) == 0) { $two_examples[0] = $entry; } /* The first entry */ $two_examples[1] = $entry; /* The last entry */ Db::execute("\n update caches\n set okapi_syncbase = now()\n where wp_oc = '" . mysql_real_escape_string($cache_code) . "'\n "); $sum += 1; } } if ($sum > 0) { $message = "Number of invalid entries scheduled to be fixed: {$sum}\n" . "Approx revision of the first one: " . Okapi::get_var('clog_revision') . "\n\n" . "Two examples:\n\n" . print_r($two_examples, true); Okapi::mail_from_okapi("*****@*****.**", "verify_clog_consistency - " . Okapi::get_normalized_site_name(), $message, true); } }