简体   繁体   中英

Python: ctypes hashable c_char array replacement without tripping over '\0' bytes

For illustration purposes, this script creates a file mapfile containing the content of the files, given as arguments, prepended by a binary header with a sha1 checksum, that allows for duplication detection on subsequent runs.

What's needed here is a hashable ctypes.c_char replacement, that can hold sha1 checksums with minimum fuzz, but not choking on '\\0' bytes.

# -*- coding: utf8 -*

import io
import mmap
import ctypes
import hashlib
import logging

from collections import OrderedDict

log = logging.getLogger(__file__)

def align(size, alignment):
    """return size aligned to alignment"""
    excess = size % alignment
    if excess:
        size = size - excess + alignment
    return size


class Header(ctypes.Structure):
    Identifier = b'HEAD'
    _fields_ = [
        ('id', ctypes.c_char * 4),
        ('hlen', ctypes.c_uint16),
        ('plen', ctypes.c_uint32),
        ('name', ctypes.c_char * 128),
        ('sha1', ctypes.c_char * 20),
    ]
HeaderSize = ctypes.sizeof(Header)

class CtsMap:
    def __init__(self, ctcls, mm, offset = 0):
        self.ctcls = ctcls
        self.mm = mm
        self.offset = offset

    def __enter__(self):
        mm = self.mm
        offset = self.offset
        ctsize = ctypes.sizeof(self.ctcls)
        if offset + ctsize > mm.size():
            newsize = align(offset + ctsize, mmap.PAGESIZE)
            mm.resize(newsize)
        self.ctinst = self.ctcls.from_buffer(mm, offset)
        return self.ctinst

    def __exit__(self, exc_type, exc_value, exc_traceback):
        del self.ctinst
        self.ctinst = None

class MapFile:
    def __init__(self, filename):
        try:
            # try to create initial file
            mapsize = mmap.PAGESIZE
            self._fd = open(filename, 'x+b')
            self._fd.write(b'\0' * mapsize)
        except FileExistsError:
            # file exists and is writable
            self._fd = open(filename, 'r+b')
            self._fd.seek(0, io.SEEK_END)
            mapsize = self._fd.tell()
        # mmap this file completely
        self._fd.seek(0)
        self._mm = mmap.mmap(self._fd.fileno(), mapsize)
        self._offset = 0
        self._toc = OrderedDict()
        self.gen_toc()

    def gen_toc(self):
        while self._offset < self._mm.size():
            with CtsMap(Header, self._mm, self._offset) as hd:
                if hd.id == Header.Identifier and hd.hlen == HeaderSize:
                    self._toc[hd.sha1] = self._offset
                    log.debug('toc: [%s]%s: %s', len(hd.sha1), hd.sha1, self._offset)
                    self._offset += HeaderSize + hd.plen
                else:
                    break
            del hd

    def add_data(self, datafile, data):
        datasize = len(data)
        sha1 = hashlib.sha1()
        sha1.update(data)
        digest = sha1.digest()

        if digest in self._toc:
            log.debug('add_data: %s added already', digest)
            return None

        log.debug('add_data: %s, %s bytes, %s', datafile, datasize, digest)
        with CtsMap(Header, self._mm, self._offset) as hd:
            hd.id = Header.Identifier
            hd.hlen = HeaderSize
            hd.plen = datasize
            hd.name = datafile
            hd.sha1 = digest
        del hd
        self._offset += HeaderSize

        log.debug('add_data: %s', datasize)
        blktype = ctypes.c_char * datasize
        with CtsMap(blktype, self._mm, self._offset) as blk:
            blk.raw = data
        del blk
        self._offset += datasize
        return HeaderSize + datasize

    def close(self):
        self._mm.close()
        self._fd.close()


if __name__ == '__main__':
    import os
    import sys

    logconfig = dict(
        level = logging.DEBUG,
        format = '%(levelname)5s: %(message)s',
    )
    logging.basicConfig(**logconfig)

    mf = MapFile('mapfile')
    for datafile in sys.argv[1:]:
        if os.path.isfile(datafile):
            try:
                data = open(datafile, 'rb').read()
            except OSError:
                continue
            else:
                mf.add_data(datafile.encode('utf-8'), data)
    mf.close()

Run: python3 hashable_ctypes_bytes.py somefiles*

Invoking it a second time, it reads through the file collecting all items in an ordered dict, using the sha1 digest as key. Unfortunately, the c_char array semantics are a little wired, because it also behaves like '\\0' terminated c string, resulting in truncated checksums here.

See line 3 and 4:

DEBUG: toc: [20]b'\xcd0\xd7\xd3\xbf\x9f\xe1\xfe\xffr\xa6g#\xee\xf8\x84\xb5S,u': 0
DEBUG: toc: [20]b'\xe9\xfe\x1a;i\xcdG0\x84\x1b\r\x7f\xf9\x14\x868\xbdVl\x8d': 1273
DEBUG: toc: [19]b'\xa2\xdb\xff$&\xfe\x0f\xb4\xcaB<F\x92\xc0\xf1`(\x96N': 3642
DEBUG: toc: [15]b'O\x1b~c\x82\xeb)\x8f\xb5\x9c\x15\xd5e:\xa9': 4650
DEBUG: toc: [20]b'\x80\xe9\xbcF\x97\xdc\x93DG\x90\x19\x8c\xca\xfep\x05\xbdM\xfby': 13841
DEBUG: add_data: b'\xcd0\xd7\xd3\xbf\x9f\xe1\xfe\xffr\xa6g#\xee\xf8\x84\xb5S,u' added already
DEBUG: add_data: b'\xe9\xfe\x1a;i\xcdG0\x84\x1b\r\x7f\xf9\x14\x868\xbdVl\x8d' added already
DEBUG: add_data: b'../python/tmp/colorselect.py', 848 bytes, b'\xa2\xdb\xff$&\xfe\x0f\xb4\xcaB<F\x92\xc0\xf1`(\x96N\x00'
DEBUG: add_data: 848
DEBUG: add_data: b'../python/tmp/DemoCode.py', 9031 bytes, b'O\x1b~c\x82\xeb)\x8f\xb5\x9c\x15\xd5e:\xa9\x00p\x0f\xc04'
DEBUG: add_data: 9031
DEBUG: add_data: b'\x80\xe9\xbcF\x97\xdc\x93DG\x90\x19\x8c\xca\xfep\x05\xbdM\xfby' added already

Usual suggestion is replace the c_char * 20 with c_byte * 20, and loosing the transparent bytes handling on that way. Apart from the data conversion hassles, c_byte arrays aren't hashable due to being bytearrays. I haven't found a practical solution without going through heavy conversion troubles all the way back and forth, or resorting to hexdigests, that doubles the sha1 digest size requirements.

I think, that the c_char design decision to mix it with C zero termination semantics was a fault in the first place. To cope with this, I could imagine to add a c_char_nz type to ctypes, that resolves this issue.

For those of you, that read the code carefully, you might wonder about the del statements of the ctypes structures. A discussion of it can be found here: .

While the below code is doing the back-and-forth conversion you mentioned, it does nicely hide the issue. I found a hash that contained a null and the field can now be used as a dictionary key. Hope it helps.

from ctypes import *
import hashlib

class Test(Structure):
    _fields_ = [('_sha1',c_ubyte * 20)]

    @property
    def sha1(self):
        return bytes(self._sha1)

    @sha1.setter
    def sha1(self, value):
        self._sha1 = (c_ubyte * 20)(*value)

test = Test()
test.sha1 = hashlib.sha1(b'aaaaaaaaaaa').digest()
D = {test.sha1:0}
print(D)

Output:

{b'u\\\x00\x1fJ\xe3\xc8\x84>ZP\xddj\xa2\xfa#\x89=\xd3\xad': 0}

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM