import h5py
from astropy.io import fits
import glob
outname = "test.hdf5"
NAXIS1 = 3800
NAXIS2 = 3800
NAXIS3 = 1500
f = h5py.File(outname,'w')
dataset = f.create_dataset("DataCube",
(NAXIS1,NAXIS2,NAXIS3),
dtype=np.float32)
f.close()
但我在尝试从fits文件写入数组时遇到了问题,因为以下for循环中的每个元素至少需要30分钟:
f = h5py.File(outname,'r+')
# This is the actual line, but I will replace it by random noise
# in order to make the example reproducible.
# fitslist = glob.glob("*fits") # They are 1500 fits files
for i in range(NAXIS3):
# Again, I replace the real data with noise.
# hdul = fits.open(fitslist[i])
# file['DataCube'][:,:,i] = hdul[0].data
data = np.random.normal(0,1,(dim0,dim1))
file['DataCube'][:,:,i] = data
f.close()
有没有更好的方法来构建一个由N个切片组成的3D数据立方体,这些切片已经存储在N个fits文件中?我原以为一旦在光盘中创建了HDF5文件,写入速度就会非常快,但事实并非如此。
非常感谢你的帮助。
编辑1:我测试了astrofrog提出的修改,效果非常好。现在性能还不错。此外,为了减少写入hdf5文件的次数,我还将几个fits文件(~ 50)存储到一个临时numpy数组中。现在代码如下所示:
NAXIS1 = len(fitslist)
NAXIS2 = fits_0[ext].header['NAXIS1']
NAXIS3 = fits_0[ext].header['NAXIS2']
shape_array = (NAXIS2, NAXIS3)
print(shape_array)
f = h5py_cache.File(outname, 'w', chunk_cache_mem_size=3000*1024**2,
libver='latest')
dataset = f.create_dataset("/x", (NAXIS1, NAXIS2, NAXIS3),
dtype=np.float32)
cache_size = 50
cache_array = np.empty(shape=(cache_size, NAXIS2, NAXIS3))
j = 0
for i in tqdm(range(len(fitslist))):
print(fitslist[i])
hdul = fits.getdata(fitslist[i], ext)
cache_array[j:j+1, :, :] = hdul
if ((i % cache_size == 0) & (i != 0)):
print("Writing to disc")
f['/x'][i-cache_size+1:i+1, :, :] = cache_array
j = 0
if (i % 100 == 0):
print("collecting garbage")
gc.collect()
j = j + 1
f.close()
我的问题是:还有比这更像蟒蛇的方式吗?我不确定这是用h5py编写文件的最有效方法,还是有更好的方法从fits读取到numpy,然后读取到hdf5。