Another solution that might better fit your needs is to use the http api instead https://www.dropbox.com/developers/documentation/http/overview.
Specifically looking at the "files/download" endpoint. https://www.dropbox.com/developers/documentation/http/documentation#files-download
With the python dropbox sdk you download each file synchronously. In order to get some more efficency you can use some of the asynchronous functionality in python 3.7 (and some additional libraries) to try and download your files together.
Here is some example code that will save a list of files to your "/tmp" directory
import aiohttp
import aiofiles
import asyncio
import json
from tqdm import tqdm_notebook as tqdm
DROPBOX_URL='https://content.dropboxapi.com/2/files/download'
async def dropbox_async_download(session, path, prefix_path="/tmp"):
## Create headers to talk to dropbox api
## see https://www.dropbox.com/developers/documentation/http/documentation#files-download
headers = {
"Authorization": "Bearer %s"%os.environ["DROPBOX_ACCESS_TOKEN"],
"Dropbox-API-Arg": json.dumps({"path": path})
}
## Create async post request
## Open file asynchronously
## write chunks from server async to file
i=0
with tqdm(total=1000) as pbar:
async with session.post(DROPBOX_URL, headers=headers) as response,\
aiofiles.open(prefix_path+path, "w") as f:
async for chunk, _ in response.content.iter_chunks():
await f.write(chunk.decode('utf-8'))
pbar.set_postfix(file=path)
pbar.update(1)
print ("Done!", path)
async def dropbox_download_files(files):
async with aiohttp.ClientSession() as session:
coroutines = list(
map(lambda f:dropbox_async_download(session,f),files)
)
return await asyncio.gather(*coroutines)
files = ["a.txt", "b.txt", "c.txt"]
loop = asyncio.get_event_loop()
loop.run_until_complete(dropbox_download_files(files)
Hope this helps!