Пример #1
0
 public static function call(OkapiRequest $request)
 {
     require_once 'replicate_common.inc.php';
     $result = array();
     $result['changelog'] = array('min_since' => ReplicateCommon::get_min_since(), 'revision' => ReplicateCommon::get_revision());
     $dump = Cache::get("last_fulldump");
     if ($dump) {
         $result['latest_fulldump'] = array('revision' => $dump['revision'], 'generated_at' => $dump['meta']['generated_at'], 'size' => $dump['meta']['compressed_size'], 'size_uncompressed' => $dump['meta']['uncompressed_size']);
     } else {
         $result['latest_fulldump'] = null;
     }
     return Okapi::formatted_response($request, $result);
 }
Пример #2
0
 public static function call(OkapiRequest $request)
 {
     require_once 'replicate_common.inc.php';
     $since = $request->get_parameter('since');
     if ($since === null) {
         throw new ParamMissing('since');
     }
     if ((int) $since != $since) {
         throw new InvalidParam('since');
     }
     # Let's check the $since parameter.
     if (!ReplicateCommon::check_since_param($since)) {
         throw new BadRequest("The 'since' parameter is too old. You must update your database more frequently.");
     }
     # Select a best chunk for the given $since, get the chunk from the database (or cache).
     list($from, $to) = ReplicateCommon::select_best_chunk($since);
     $clog_entries = ReplicateCommon::get_chunk($from, $to);
     $result = array('changelog' => &$clog_entries, 'revision' => $to + 0, 'more' => $to < ReplicateCommon::get_revision());
     return Okapi::formatted_response($request, $result);
 }
Пример #3
0
 public function execute()
 {
     require_once $GLOBALS['rootpath'] . "okapi/services/replicate/replicate_common.inc.php";
     $max_revision = ReplicateCommon::get_revision();
     $cache_key = 'clog_revisions_daily';
     $data = Cache::get($cache_key);
     if ($data == null) {
         $data = array();
     }
     $data[time()] = $max_revision;
     $new_min_revision = 1;
     $new_data = array();
     foreach ($data as $time => $r) {
         if ($time < time() - 10 * 86400) {
             $new_min_revision = max($new_min_revision, $r);
         } else {
             $new_data[$time] = $r;
         }
     }
     Db::execute("\n            delete from okapi_clog\n            where id < '" . mysql_real_escape_string($new_min_revision) . "'\n        ");
     Cache::set($cache_key, $new_data, 10 * 86400);
     Db::query("optimize table okapi_clog");
 }
Пример #4
0
 private static function ver89()
 {
     # Ignore newly added replicate fields. This way we will avoid generating
     # changelog entries for these fields.
     require_once $GLOBALS['rootpath'] . "okapi/services/replicate/replicate_common.inc.php";
     $new_geocache_fields = array('attr_acodes', 'willattends', 'country', 'state', 'preview_image', 'trip_time', 'trip_distance', 'gc_code', 'hints2', 'protection_areas');
     ReplicateCommon::verify_clog_consistency(true, $new_geocache_fields);
 }