public function update($name, $crit, $new_data) { if (!$this->lock_table($name, true)) { return false; } $str_res = $this->locked_tables_list[$name]; extract($str_res); $crit['col'] = '*'; $crit['offsets'] = true; $crit['explain'] = false; $crit['rtrim_strings'] = false; $success = false; $this->I->meta = $meta; try { $res = $this->select($name, $crit, $str_res); if (isset($new_data[$aname]) && sizeof($res) > 1) { throw new Exception('You cannot set new PRIMARY KEY value for more than one row at once.'); } if ($res === false) { break; } /* PHASE 0: Check that row update will not cause "Duplicate key errors" */ $need_unique_update = sizeof($unique) && sizeof(array_intersect($unique, array_keys($new_data))); if ($need_unique_update && ($ufp = fopen_cached($this->dir . '/' . $name . '.btr', 'r+b'))) { foreach ($unique as $unique_name) { if (!isset($new_data[$unique_name])) { continue; } /* The conflict will be caused if: - the row updated from the old value to the value that already exists - if the update tries to set the same value for number of rows that exceed 1 */ $srch = $this->I->search_unique($ufp, $new_data, $fields, $unique_name); if (sizeof($res) > 1 || sizeof($res) == 1 && $res[0][$unique_name] != $new_data[$unique_name] && $srch !== false) { throw new Exception('Duplicate key ' . $new_data[$unique_name] . ' for ' . $unique_name); } } } else { if ($need_unique_update && !$ufp) { throw new Exception('Unique index file corrupt.'); } } /* PHASE 1: Check for primary index & insert new value if needed */ if (isset($new_data[$aname]) && ($pfp = fopen_cached($this->dir . '/' . $name . '.pri', 'r+b'))) { $succ = true; foreach ($res as $data) { if (isset($new_data[$aname]) && $new_data[$aname] < $acnt && $new_data[$aname] != $data[$aname]) { $cnt = $new_data[$aname]; fseek($pfp, 4 * $cnt); list(, $offset) = unpack('l', fread($pfp, 4)); if ($offset < 0) { fseek($pfp, 4 * $data[$aname], SEEK_SET); list(, $newoff) = unpack('l', fread($pfp, 4)); fseek($pfp, -4, SEEK_CUR); fputs($pfp, pack('l', -1)); fseek($pfp, 4 * $cnt, SEEK_SET); fputs($pfp, pack('l', $newoff)); } else { throw new Exception('Duplicate primary key value'); } } } if (!$succ) { break; } // either problem with delete_primary, delete_primary should already have set an error } else { if (isset($new_data[$aname])) { throw new Exception('Primary index file corrupt.'); } } /* PHASE 2: Insert unique index values (it is safe to do now) */ //echo 'SUCC '.__LINE__.'<br>'; if ($need_unique_update) { foreach ($unique as $unique_name) { if (!isset($new_data[$unique_name])) { continue; } foreach ($res as $data) { if ($data[$unique_name] == $new_data[$unique_name]) { continue; } $this->I->delete_unique($ufp, $data, $fields, $unique_name); $this->I->insert_unique($ufp, $new_data, $fields, $unique_name, $data['__offset']); } } } /* PHASE 3: Insert index values */ //echo 'SUCC '.__LINE__.'<br>'; $need_index_update = sizeof($index) && sizeof(array_intersect($index, array_keys($new_data))); if ($need_index_update && ($ifpi = fopen_cached($this->dir . '/' . $name . '.idx', 'r+b')) && ($ifp = fopen_cached($this->dir . '/' . $name . '.btr', 'r+b'))) { foreach ($index as $index_name) { if (!isset($new_data[$index_name])) { continue; } foreach ($res as $data) { if ($data[$index_name] == $new_data[$index_name]) { continue; } // no need to update it :)) $this->I->delete_index($ifp, $ifpi, $data, $fields, $index_name, $data['__offset']); $this->I->insert_index($ifp, $ifpi, $new_data, $fields, $index_name, $data['__offset']); } } // echo 'FAIL '.__LINE__.'<br>'; } else { if ($need_index_update && (!$ifpi || !$ifp)) { throw new Exception('Index file corrupt.'); // echo 'FAIL '.__LINE__.'<br>'; } } /* LAST PHASE: Insert the data itself */ //echo 'SUCC '.__LINE__.'<br>'; if ($fp = fopen_cached($this->dir . '/' . $name . '.dat', 'r+b')) { foreach ($res as $data_key => $data) { $off = $data['__offset']; fseek($fp, $off, SEEK_SET); $ins = ''; //pack('x'); /* data for insertion to db */ $need_row_split = false; foreach ($fields as $k => $v) { @($od = $data[$k]); $d = isset($new_data[$k]) ? $new_data[$k] : false; // already checked for correctness of the operation switch ($v) { case 'BYTE': $ins .= pack('c', $d !== false ? $d : $od); break; case 'INT': $ins .= pack('l', $d !== false ? $d : $od); break; case 'TINYTEXT': case 'TEXT': case 'LONGTEXT': $length = 'C'; if ($v == 'TEXT') { $length = 'S'; } if ($v == 'LONGTEXT') { $length = 'l'; } if ($d === false) { $ins .= pack($length, strlen($od)); $ins .= $od; } else { if ($length == 'C' && strlen($d) > 255) { $d = substr($d, 0, 255); } else { if ($length == 'S' && strlen($d) > 65535) { $d = substr($d, 0, 65535); } else { if ($length == 'l' && strlen($d) > self::MAX_LEN) { $d = substr($d, 0, self::MAX_LEN); } } } if (strlen($d) > strlen($od)) { //$d = substr($d, 0, strlen($od)); $need_row_split = true; } if (strlen($d) < strlen($od)) { $d .= str_repeat(' ', strlen($od) - strlen($d)); } // should not add less, as it can lead to row corruption $ins .= pack($length, strlen($d)); $ins .= $d; } break; case 'DOUBLE': $ins .= pack('d', $d !== false ? $d : $od); break; } if ($d !== false) { $res[$data_key][$k] = $d; } } list(, $n) = unpack('c', fgetc($fp)); if ($n == self::ROW_NORMAL) { if (!$need_row_split) { //echo 'No need to split row<br>'; fwrite($fp, $ins, strlen($ins)); // 0 bytes written means an error too } else { //echo 'Need to split row<br>'; // first, determine initial length fseek($fp, -1, SEEK_CUR); $old_pos = ftell($fp); $this->read_row($fields, $fp, $rtrim_strings = false); $length = ftell($fp) - $old_pos - 1; // 1 byte which indicates row state is not counted // the point is that we do not want to do the split row operation often, // so we leave some space (either 32 bytes or excess size), // choosing the highest value // length in the original row will be decreased by 8 bytes // as we write OFFSET_NEXT_PART and ROW_LENGTH first and then data $add_length = strlen($ins) - $length + 8; $spare_length = max(32, strlen($ins) - $length); // first, write the tail fseek($fp, 0, SEEK_END); $next_off = ftell($fp); // write TYPE, OFFSET_OF_NEXT_PART = -1 fwrite($fp, pack('cll', self::ROW_CONTINUE, -1, $add_length + $spare_length)); // the thing is that actual data in the original row will become shorter // by 8 bytes as we write OFFSET_NEXT_PART and ROW_LENGTH first and then data fwrite($fp, substr($ins, $length - 8)); fwrite($fp, str_repeat(pack('x'), $spare_length)); // tail written, now rewrite original row fseek($fp, $old_pos, SEEK_SET); fwrite($fp, pack('cll', self::ROW_SPLIT, $next_off, $length - 8)); fwrite($fp, substr($ins, 0, $length - 8)); } } else { if ($n == self::ROW_SPLIT) { // need_row_split is used only in case the field has not been split before // because otherwise we have an easy way to determine overall length of the fragments // and decide whether row split if really required fseek($fp, -1, SEEK_CUR); $old_off = ftell($fp); //echo 'Row is split at offset '.$old_off.'<br>'; //echo 'Row contents: <pre>',!print_r($this->read_row(array_merge(array('__offset' => 'OFFSET'),$fields), $fp, false)),'</pre>'; fseek($fp, $old_off, SEEK_SET); $chunks = array(); // ROW_OFFSET => ROW_LENGTH $offset = $old_off; while (true) { //echo 'get row type from '.ftell($fp).'<br>'; list(, $n) = unpack('c', fgetc($fp)); list(, $next_offset, $row_length) = unpack('l2', fread($fp, 8)); $chunks[$offset] = $row_length; if ($n != self::ROW_SPLIT && $n != self::ROW_CONTINUE) { throw new Exception('Data file corrupt (invalid beginning of split row, expected ' . self::ROW_SPLIT . ' or ' . self::ROW_CONTINUE . ', got ' . $n . '). Please run repair table tools if present.'); } if ($offset < 0 || fseek($fp, $next_offset) < 0) { break; } $offset = $next_offset; } //echo 'chunks: <pre>',!print_r($chunks),'</pre>'; $chunks_offs = array_keys($chunks); $chunks_len = array_sum($chunks); if (strlen($ins) <= $chunks_len) { $need_row_split = false; } else { $need_row_split = true; } if ($need_row_split) { //echo 'Need to split row (with total of '.(sizeof($chunks) + 1).' chunks)<br>'; // adding new chunk fseek($fp, 0, SEEK_END); $offset = ftell($fp); $chunks[$offset] = strlen($ins) - $chunks_len + max(32, strlen($ins) - $chunks_len); $chunks_len += $chunks[$offset]; $chunks_offs[] = $offset; } // right padding initial string with zero bytes // so that we can just use substr() to get data for chunks if (strlen($ins) < $chunks_len) { $ins .= str_repeat(pack('x'), $chunks_len - strlen($ins)); } $spl = true; // split or continue? $str_j = 0; // current position in data string $str_i = 0; // next chunk index foreach ($chunks as $offset => $row_length) { fseek($fp, $offset, SEEK_SET); $next_off = -1; $str_i++; if (isset($chunks_offs[$str_i])) { $next_off = $chunks_offs[$str_i]; } fwrite($fp, pack('cll', $spl ? self::ROW_SPLIT : self::ROW_CONTINUE, $next_off, $row_length)); $spl = false; fwrite($fp, substr($ins, $str_j, $row_length)); $str_j += $row_length; } // thanks for reading this rather short, but very exciting part :)) } else { throw new Exception('Data file error (unknown row type).'); } } // } } else { throw new Exception('Data table corrupt'); } $success = true; //echo 'SUCCESS'; } catch (Exception $e) { $this->unlock_table($name); throw $e; } if ($this->I->meta !== $meta) { $meta = $this->I->meta; rewind($str_fp); ftruncate($str_fp, 0); fwrite($str_fp, serialize(array($fields, $params, $meta))); fflush($str_fp); } /* $rollback = !$success; // roll changes to the files back? foreach(explode(' ', 'pfp ifpi ifp ufp fp') as $v) if(isset($$v)) { //if($rollback) rfrollback($$v); //else rfcommit($$v); fflush($$v); } */ $this->unlock_table($name); return $res; }
function openTable_Index_ExactMatch($name, $columns, $col, $value) { static $uniqid = 0; if (!$this->lock_table($name)) { return false; } $type = false; // types can be: self::INDEX_PRIMARY, self::INDEX_INDEX, self::INDEX_UNIQUE $pointers = false; // if one pointer, than it is just a value. Several pointers should be presented as cortege (an ordered list) try { if (!($fp = fopen_cached($this->dir . '/' . $name . '.dat', 'r+b'))) { throw new Exception('Data file corrupt.'); } extract($this->locked_tables_list[$name]); if ($col == $aname) { $type = self::INDEX_PRIMARY; } else { if (in_array($col, $index)) { $type = self::INDEX_INDEX; } else { if (in_array($col, $unique)) { $type = self::INDEX_UNIQUE; } else { throw new Exception('No index for column `' . $col . '`"'); } } } $columns = $this->checkColumns($fields, $columns); switch ($type) { case self::INDEX_PRIMARY: $pfp = fopen_cached($this->dir . '/' . $name . '.pri', 'r+b'); if (!$pfp) { throw new Exception('Primary index corrupt'); } $pointers = $pfp; break; case self::INDEX_UNIQUE: $ufp = fopen_cached($this->dir . '/' . $name . '.btr', 'r+b'); if (!$ufp) { throw new Exception('B-Tree index corrupt'); } $pointers = $ufp; break; case self::INDEX_INDEX: $ifp = fopen_cached($this->dir . '/' . $name . '.btr', 'r+b'); $ifpi = fopen_cached($this->dir . '/' . $name . '.idx', 'r+b'); if (!$ifp || !$ifpi) { throw new Exception('Either B-Tree or List index is corrupt'); } $pointers = array($ifp, $ifpi); break; default: throw new Exception('Unknown index type (this error must never happen)'); break; } } catch (Exception $e) { // should not close anything, as descriptors are cached (they are not cached in case of failure, so no need to worry about this either) $this->unlock_table($name); //return $this->set_error($err); throw $e; } $uniqid++; return array($name, $columns, $col, $value, $meta, $type, $pointers, $fp, $uniqid, $fields); }