2 * Copyright (C) 1994, Index Data I/S
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.2 1994-09-26 17:06:36 quinn
10 * Revision 1.1 1994/09/26 16:07:57 quinn
11 * Most of the functionality in place.
16 * This module handles the representation of tables in the bfiles.
24 static int is_freestore_alloc(ISAM is, int type)
28 if (is->types[type].freelist >= 0)
30 tmp = is->types[type].freelist;
31 if (bf_read(is->types[type].bf, tmp, 0, sizeof(tmp),
32 &is->types[type].freelist) <=0)
34 log(LOG_FATAL, "Failed to allocate block");
39 tmp = is->types[type].top++;
44 static void is_freestore_free(ISAM is, int type, int block)
48 tmp = is->types[type].freelist;
49 is->types[type].freelist = block;
50 if (bf_write(is->types[type].bf, block, 0, sizeof(tmp), &tmp) < 0)
52 log(LOG_FATAL, "Failed to deallocate block.");
57 /* this code must be modified to handle an index */
58 int is_p_read_partial(is_mtable *tab, is_mblock *block)
63 assert(block->state == IS_MBSTATE_UNREAD);
64 block->data = buf = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
65 toread = tab->is->types[tab->pos_type].blocksize;
66 if (toread > is_mbuf_size[buf->type])
68 toread = is_mbuf_size[buf->type];
69 block->state = IS_MBSTATE_PARTIAL;
72 block->state = IS_MBSTATE_CLEAN;
73 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, 0, toread,
76 log(LOG_FATAL, "bfread failed.");
79 /* extract header info */
81 memcpy(&block->num_records, buf->data, sizeof(block->num_records));
82 buf->offset += sizeof(block->num_records);
83 memcpy(&block->nextpos, buf->data + buf->offset,
84 sizeof(block->nextpos));
85 buf->offset += sizeof(block->nextpos);
86 if (block == tab->data) /* first block */
88 memcpy(&tab->num_records, buf->data + buf->offset,
89 sizeof(tab->num_records));
90 buf->offset +=sizeof(tab->num_records);
92 buf->num = (toread - buf->offset) / is_keysize(tab->is);
93 if (buf->num >= block->num_records)
95 buf->num = block->num_records;
96 block->state = IS_MBSTATE_CLEAN;
99 block->bread = buf->num * is_keysize(tab->is);
103 int is_p_read_full(is_mtable *tab, is_mblock *block)
108 if (block->state == IS_MBSTATE_UNREAD && is_p_read_partial(tab, block) < 0)
110 log(LOG_FATAL, "partial read failed.");
113 if (block->state == IS_MBSTATE_PARTIAL)
116 dread = block->data->num;
117 while (dread < block->num_records)
119 buf->next = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
122 toread = is_mbuf_size[buf->type] / is_keysize(tab->is);
123 if (toread > block->num_records - dread)
124 toread = block->num_records - dread;
126 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, block->bread, toread *
127 is_keysize(tab->is), buf->data) < 0)
129 log(LOG_FATAL, "bfread failed.");
135 block->bread += toread * is_keysize(tab->is);
142 * write dirty blocks to bfile.
143 * Allocate blocks as necessary.
145 void is_p_sync(is_mtable *tab)
150 isam_blocktype *type;
152 type = &tab->is->types[tab->pos_type];
153 for (p = tab->data; p; p = p->next)
155 /* make sure that blocks are allocated. */
157 p->diskpos = is_freestore_alloc(tab->is, tab->pos_type);
160 if (p->next->diskpos < 0)
161 p->nextpos = p->next->diskpos = is_freestore_alloc(tab->is,
164 p->nextpos = p->next->diskpos;
167 memcpy(type->dbuf, &p->num_records, sizeof(p->num_records));
168 sum += sizeof(p->num_records);
169 memcpy(type->dbuf + sum, &p->nextpos, sizeof(p->nextpos));
170 sum += sizeof(p->nextpos);
171 if (p == tab->data) /* first block */
173 memcpy(type->dbuf + sum, &tab->num_records,
174 sizeof(tab->num_records));
175 sum += sizeof(tab->num_records);
177 for (b = p->data; b; b = b->next)
179 memcpy(type->dbuf + sum, b->data + b->offset, v = b->num *
180 is_keysize(tab->is));
182 assert(sum <= type->blocksize);
184 if (bf_write(type->bf, p->diskpos, 0, sum, type->dbuf) < 0)
186 log(LOG_FATAL, "Failed to write block.");
193 * Free all disk blocks associated with table.
195 void is_p_unmap(is_mtable *tab)
199 for (p = tab->data; p; p = p->next)
202 is_freestore_free(tab->is, tab->pos_type, p->diskpos);
207 static is_mbuf *mbuf_takehead(is_mbuf **mb, int *num, int keysize)
209 is_mbuf *p = 0, **pp = &p, *new;
212 while (*mb && toget >= (*mb)->num)
220 if (toget > 0 && *mb)
222 new = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
223 new->next = (*mb)->next;
225 new->data = (*mb)->data;
227 new->offset = (*mb)->offset + toget * keysize;
228 new->num = (*mb)->num - toget;
240 * Split up individual blocks which have grown too large.
241 * is_p_align and is_p_remap are alternative functions which trade off
242 * speed in updating versus optimum usage of disk blocks.
244 void is_p_align(is_mtable *tab)
246 is_mblock *mblock, *new;
247 is_mbuf *mbufs, *mbp;
248 int blocks, recsblock;
250 log(LOG_DEBUG, "Realigning table.");
251 for (mblock = tab->data; mblock; mblock = mblock->next)
253 if (mblock->state == IS_MBSTATE_DIRTY && mblock->num_records >
254 (mblock == tab->data ?
255 tab->is->types[tab->pos_type].max_keys_block0 :
256 tab->is->types[tab->pos_type].max_keys_block))
258 blocks = tab->num_records /
259 tab->is->types[tab->pos_type].nice_keys_block;
260 if (tab->num_records %
261 tab->is->types[tab->pos_type].nice_keys_block)
263 recsblock = tab->num_records / blocks;
266 mbufs = mblock->data;
267 while ((mbp = mbuf_takehead(&mbufs, &recsblock,
268 is_keysize(tab->is))))
270 new = xmalloc_mblock();
272 new->state = IS_MBSTATE_DIRTY;
273 new->next = mblock->next;
276 mblock->num_records = recsblock;
277 mblock = mblock->next;
284 * Reorganize data in blocks for minimum block usage and quick access.
285 * Free surplus blocks.
286 * is_p_align and is_p_remap are alternative functions which trade off
287 * speed in updating versus optimum usage of disk blocks.
289 void is_p_remap(is_mtable *tab)
291 is_mbuf *mbufs, **bufpp, *mbp;
292 is_mblock *blockp, **blockpp;
293 int recsblock, blocks;
295 log(LOG_DEBUG, "Remapping table.");
296 /* collect all data */
298 for (blockp = tab->data; blockp; blockp = blockp->next)
300 *bufpp = blockp->data;
302 bufpp = &(*bufpp)->next;
305 blocks = tab->num_records / tab->is->types[tab->pos_type].nice_keys_block;
306 if (tab->num_records % tab->is->types[tab->pos_type].nice_keys_block)
308 recsblock = tab->num_records / blocks;
311 blockpp = &tab->data;
312 while ((mbp = mbuf_takehead(&mbufs, &recsblock, is_keysize(tab->is))))
316 *blockpp = xmalloc_mblock();
317 (*blockpp)->diskpos = -1;
319 (*blockpp)->data = mbp;
320 (*blockpp)->num_records = recsblock;
321 (*blockpp)->state = IS_MBSTATE_DIRTY;
322 blockpp = &(*blockpp)->next;