/** * Check if any cron-5 cronjobs are scheduled to execute and execute * them if needed. Reschedule for new executions. */ public static function execute_cron5_cronjobs() { $nearest_event = Okapi::get_var("cron_nearest_event"); if ($nearest_event + 0 <= time()) { set_time_limit(0); ignore_user_abort(true); require_once $GLOBALS['rootpath'] . "okapi/cronjobs.php"; $nearest_event = CronJobController::run_jobs('cron-5'); Okapi::set_var("cron_nearest_event", $nearest_event); } }
/** Check for modifications in the database and update the changelog table accordingly. */ public static function update_clog_table() { $now = Db::select_value("select date_add(now(), interval -1 minute)"); # See issue 157. $last_update = Okapi::get_var('last_clog_update'); if ($last_update === null) { $last_update = Db::select_value("select date_add(now(), interval -1 day)"); } # Usually this will be fast. But, for example, if admin changes ALL the # caches, this will take forever. But we still want it to finish properly # without interruption. set_time_limit(0); ignore_user_abort(true); # Get the list of modified cache codes. Split it into groups of N cache codes. # Note that we should include ALL cache codes in this particular query, not # only "status in (1,2,3)". This way, when the cache changes its status, e.g. # from 3 to 6, changelog will include a proper "delete" statement. $cache_codes = Db::select_column("\n select wp_oc\n from caches\n where okapi_syncbase > '" . mysql_real_escape_string($last_update) . "';\n "); $cache_code_groups = Okapi::make_groups($cache_codes, 50); unset($cache_codes); # For each group, update the changelog table accordingly. foreach ($cache_code_groups as $cache_codes) { self::generate_changelog_entries('services/caches/geocaches', 'geocache', 'cache_codes', 'code', $cache_codes, self::$logged_cache_fields, false, true, null); } # Same as above, for log entries. $offset = 0; while (true) { $log_uuids = Db::select_column("\n select uuid\n from cache_logs\n where okapi_syncbase > '" . mysql_real_escape_string($last_update) . "'\n limit {$offset}, 10000;\n "); if (count($log_uuids) == 0) { break; } $offset += 10000; $log_uuid_groups = Okapi::make_groups($log_uuids, 100); unset($log_uuids); foreach ($log_uuid_groups as $log_uuids) { self::generate_changelog_entries('services/logs/entries', 'log', 'log_uuids', 'uuid', $log_uuids, self::$logged_log_entry_fields, false, true, 3600); } } if (Settings::get('OC_BRANCH') == 'oc.de') { # On OCDE branch, deleted log entries are MOVED to another table. # So the above queries won't detect them. We need to run one more. # We will assume there are not so many of them and we don't have to # split them in groups as we did above. $DELETED_uuids = Db::select_column("\n select uuid\n from cache_logs_archived\n where okapi_syncbase > '" . mysql_real_escape_string($last_update) . "'\n "); self::generate_changelog_entries('services/logs/entries', 'log', 'log_uuids', 'uuid', $DELETED_uuids, self::$logged_log_entry_fields, false, true, 3600); } # Update state variables. Okapi::set_var("last_clog_update", $now); $revision = Db::select_value("select max(id) from okapi_clog"); Okapi::set_var("clog_revision", $revision); }
public function execute() { $current_clog_revision = Okapi::get_var('clog_revision', 0); $tiletree_revision = Okapi::get_var('clog_followup_revision', 0); if ($tiletree_revision === $current_clog_revision) { # No update necessary. } elseif ($tiletree_revision < $current_clog_revision) { require_once $GLOBALS['rootpath'] . "okapi/services/caches/map/replicate_listener.inc.php"; if ($current_clog_revision - $tiletree_revision < 30000) { for ($timeout = time() + 240; time() < $timeout;) { try { $response = OkapiServiceRunner::call('services/replicate/changelog', new OkapiInternalRequest(new OkapiInternalConsumer(), null, array('since' => $tiletree_revision))); \okapi\services\caches\map\ReplicateListener::receive($response['changelog']); $tiletree_revision = $response['revision']; Okapi::set_var('clog_followup_revision', $tiletree_revision); if (!$response['more']) { break; } } catch (BadRequest $e) { # Invalid 'since' parameter? May happen when crontab was # not working for more than 10 days. Or, just after OKAPI # is installed (and this is the first time this cronjob # if being run). $mail_admins = $tiletree_revision > 0; \okapi\services\caches\map\ReplicateListener::reset(); Okapi::set_var('clog_followup_revision', $current_clog_revision); break; } } } else { # Some kind of bigger update. Resetting TileTree might be a better option. \okapi\services\caches\map\ReplicateListener::reset(); Okapi::set_var('clog_followup_revision', $current_clog_revision); } } }
private static function ver41() { # Force changelog reset (will be produced one day back) Db::execute("delete from okapi_vars where var='last_clog_update'"); # Force all cronjobs rerun Okapi::set_var("cron_nearest_event", 0); Cache::delete('cron_schedule'); }