首页
学习
活动
专区
圈层
工具
发布
社区首页 >问答首页 >DirectSound接口说明

DirectSound接口说明
EN

Stack Overflow用户
提问于 2021-01-16 21:07:37
回答 1查看 412关注 0票数 0

作为一个大学项目,我们必须使用DirectSound应用程序接口开发一个服务器-客户端音乐流媒体应用程序。然而,由于缺乏在线信息、指南或教程,我可以收集有关它的信息的唯一来源是下面提供的一段代码(这是讲师提供的唯一内容)。有没有人能帮我理解这些函数的一般用途以及它们的实现顺序?

提前谢谢。

代码语言:javascript
复制
IDirectSound8 *         directSound = nullptr;
IDirectSoundBuffer *    primaryBuffer = nullptr;
IDirectSoundBuffer8 *   secondaryBuffer = nullptr;
BYTE *                  dataBuffer = nullptr;
DWORD                   dataBufferSize;
DWORD                   averageBytesPerSecond;

// Search the file for the chunk we want  
// Returns the size of the chunk and its location in the file

HRESULT FindChunk(HANDLE fileHandle, FOURCC fourcc, DWORD & chunkSize, DWORD & chunkDataPosition)
{
HRESULT hr = S_OK;
DWORD chunkType;
DWORD chunkDataSize;
DWORD riffDataSize = 0;
DWORD fileType;
DWORD bytesRead = 0;
DWORD offset = 0;

if (SetFilePointer(fileHandle, 0, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
{
    return HRESULT_FROM_WIN32(GetLastError());
}
while (hr == S_OK)
{
    if (ReadFile(fileHandle, &chunkType, sizeof(DWORD), &bytesRead, NULL) == 0)
    {
        hr = HRESULT_FROM_WIN32(GetLastError());
    }
    if (ReadFile(fileHandle, &chunkDataSize, sizeof(DWORD), &bytesRead, NULL) == 0)
    {
        hr = HRESULT_FROM_WIN32(GetLastError());
    }
    switch (chunkType)
    {
    case fourccRIFF:
        riffDataSize = chunkDataSize;
        chunkDataSize = 4;
        if (ReadFile(fileHandle, &fileType, sizeof(DWORD), &bytesRead, NULL) == 0)
        {
            hr = HRESULT_FROM_WIN32(GetLastError());
        }
        break;

    default:
        if (SetFilePointer(fileHandle, chunkDataSize, NULL, FILE_CURRENT) == INVALID_SET_FILE_POINTER)
        {
            return HRESULT_FROM_WIN32(GetLastError());
        }
    }

    offset += sizeof(DWORD) * 2;
    if (chunkType == fourcc)
    {
        chunkSize = chunkDataSize;
        chunkDataPosition = offset;
        return S_OK;
    }

    offset += chunkDataSize;
    if (bytesRead >= riffDataSize)
    {
        return S_FALSE;
    }
}
return S_OK;
}

// Read a chunk of data of the specified size from the file at the specifed location into the 
supplied buffer

HRESULT ReadChunkData(HANDLE fileHandle, void * buffer, DWORD buffersize, DWORD bufferoffset)
{
HRESULT hr = S_OK;
DWORD bytesRead;

if (SetFilePointer(fileHandle, bufferoffset, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
{
    return HRESULT_FROM_WIN32(GetLastError());
}
if (ReadFile(fileHandle, buffer, buffersize, &bytesRead, NULL) == 0)
{
    hr = HRESULT_FROM_WIN32(GetLastError());
}
return hr;
}

bool Initialise()
{
HRESULT result;
DSBUFFERDESC bufferDesc;
WAVEFORMATEX waveFormat;

// Initialize the direct sound interface pointer for the default sound device.
result = DirectSoundCreate8(NULL, &directSound, NULL);
if (FAILED(result))
{
    return false;
}

// Set the cooperative level to priority so the format of the primary sound buffer can be modified.
// We use the handle of the desktop window since we are a console application.  If you do write a 
// graphical application, you should use the HWnd of the graphical application. 
result = directSound->SetCooperativeLevel(GetDesktopWindow(), DSSCL_PRIORITY);
if (FAILED(result))
{
    return false;
}

// Setup the primary buffer description.
bufferDesc.dwSize = sizeof(DSBUFFERDESC);
bufferDesc.dwFlags = DSBCAPS_PRIMARYBUFFER | DSBCAPS_CTRLVOLUME;
bufferDesc.dwBufferBytes = 0;
bufferDesc.dwReserved = 0;
bufferDesc.lpwfxFormat = NULL;
bufferDesc.guid3DAlgorithm = GUID_NULL;

// Get control of the primary sound buffer on the default sound device.
result = directSound->CreateSoundBuffer(&bufferDesc, &primaryBuffer, NULL);
if (FAILED(result))
{
    return false;
}

// Setup the format of the primary sound bufffer.
// In this case it is a .WAV file recorded at 44,100 samples per second in 16-bit stereo (cd audio 
format).
// Really, we should set this up from the wave file format loaded from the file.
waveFormat.wFormatTag = WAVE_FORMAT_PCM;
waveFormat.nSamplesPerSec = 44100;
waveFormat.wBitsPerSample = 16;
waveFormat.nChannels = 2;
waveFormat.nBlockAlign = (waveFormat.wBitsPerSample / 8) * waveFormat.nChannels;
waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
waveFormat.cbSize = 0;

// Set the primary buffer to be the wave format specified.
result = primaryBuffer->SetFormat(&waveFormat);
if (FAILED(result))
{
    return false;
}
return true;
}

void Shutdown()
{
// Destroy the data buffer
if (dataBuffer != nullptr)
{
    delete[] dataBuffer;
    dataBuffer = nullptr;
}

// Release the primary sound buffer pointer.
if (primaryBuffer != nullptr)
{
    primaryBuffer->Release();
    primaryBuffer = nullptr;
}

// Release the direct sound interface pointer.
if (directSound != nullptr)
{
    directSound->Release();
    directSound = nullptr;
}
}

// Load the wave file into memory and setup the secondary buffer.

bool LoadWaveFile(TCHAR * filename)
{
WAVEFORMATEXTENSIBLE wfx = { 0 };
WAVEFORMATEX waveFormat;
DSBUFFERDESC bufferDesc;
HRESULT result;
IDirectSoundBuffer * tempBuffer;

DWORD chunkSize;
DWORD chunkPosition;
DWORD filetype;
HRESULT hr = S_OK;

// Open the wave file
HANDLE fileHandle = CreateFile(filename, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0, 
NULL);
if (fileHandle == INVALID_HANDLE_VALUE)
{
    return false;
}
if (SetFilePointer(fileHandle, 0, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
{
    return false;
}
// Make sure we have a RIFF wave file
FindChunk(fileHandle, fourccRIFF, chunkSize, chunkPosition);
ReadChunkData(fileHandle, &filetype, sizeof(DWORD), chunkPosition);
if (filetype != fourccWAVE)
{
    return false;
}
// Locate the 'fmt ' chunk, and copy its contents into a WAVEFORMATEXTENSIBLE structure. 
FindChunk(fileHandle, fourccFMT, chunkSize, chunkPosition);
ReadChunkData(fileHandle, &wfx, chunkSize, chunkPosition);
// Find the audio data chunk
FindChunk(fileHandle, fourccDATA, chunkSize, chunkPosition);
dataBufferSize = chunkSize;
// Read the audio data from the 'data' chunk.  This is the data that needs to be copied into
// the secondary buffer for playing
dataBuffer = new BYTE[dataBufferSize];
ReadChunkData(fileHandle, dataBuffer, dataBufferSize, chunkPosition);
CloseHandle(fileHandle);

// Set the wave format of the secondary buffer that this wave file will be loaded onto.
// The value of wfx.Format.nAvgBytesPerSec will be very useful to you since it gives you
// an approximate value for how many bytes it takes to hold one second of audio data.
waveFormat.wFormatTag =  wfx.Format.wFormatTag;
waveFormat.nSamplesPerSec = wfx.Format.nSamplesPerSec;
waveFormat.wBitsPerSample = wfx.Format.wBitsPerSample;
waveFormat.nChannels = wfx.Format.nChannels;
waveFormat.nBlockAlign = wfx.Format.nBlockAlign;
waveFormat.nAvgBytesPerSec = wfx.Format.nAvgBytesPerSec;
waveFormat.cbSize = 0;

// Set the buffer description of the secondary sound buffer that the wave file will be loaded onto. 

// In this example, we setup a buffer the same size as that of the audio data.  For the assignment, 
// your secondary buffer should only be large enough to hold approximately four seconds of data. 
bufferDesc.dwSize = sizeof(DSBUFFERDESC);
bufferDesc.dwFlags = DSBCAPS_CTRLVOLUME | DSBCAPS_GLOBALFOCUS | DSBCAPS_CTRLPOSITIONNOTIFY;
bufferDesc.dwBufferBytes = dataBufferSize;
bufferDesc.dwReserved = 0;
bufferDesc.lpwfxFormat = &waveFormat;
bufferDesc.guid3DAlgorithm = GUID_NULL;

// Create a temporary sound buffer with the specific buffer settings.
result = directSound->CreateSoundBuffer(&bufferDesc, &tempBuffer, NULL);
if (FAILED(result))
{
    return false;
}

// Test the buffer format against the direct sound 8 interface and create the secondary buffer.
result = tempBuffer->QueryInterface(IID_IDirectSoundBuffer8, (void**)&secondaryBuffer);
if (FAILED(result))
{
    return false;
}

// Release the temporary buffer.
tempBuffer->Release();
tempBuffer = nullptr;

return true;
}

void ReleaseSecondaryBuffer()
{
// Release the secondary sound buffer.
if (secondaryBuffer != nullptr)
{
    (secondaryBuffer)->Release();
    secondaryBuffer = nullptr;
}
}

bool PlayWaveFile()
{
HRESULT result;
unsigned char * bufferPtr1;
unsigned long   bufferSize1;
unsigned char * bufferPtr2;
unsigned long   bufferSize2;
BYTE * dataBufferPtr = dataBuffer;
DWORD soundBytesOutput = 0;
bool fillFirstHalf = true;
LPDIRECTSOUNDNOTIFY8 directSoundNotify;
DSBPOSITIONNOTIFY positionNotify[2];

// Set position of playback at the beginning of the sound buffer.
result = secondaryBuffer->SetCurrentPosition(0);
if (FAILED(result))
{
    return false;
}

// Set volume of the buffer to 100%.
result = secondaryBuffer->SetVolume(DSBVOLUME_MAX);
if (FAILED(result))
{
    return false;
}

// Create an event for notification that playing has stopped.  This is only useful
// when your audio file fits in the entire secondary buffer (as in this example).  
// For the assignment, you are going to need notifications when the playback has reached the 
// first quarter of the buffer or the third quarter of the buffer so that you know when 
// you should copy more data into the secondary buffer. 
HANDLE playEventHandles[1];
playEventHandles[0] = CreateEvent(NULL, FALSE, FALSE, NULL);

result = secondaryBuffer->QueryInterface(IID_IDirectSoundNotify8, (LPVOID*)&directSoundNotify);
if (FAILED(result))
{
    return false;
}
// This notification is used to indicate that we have finished playing the buffer of audio. In
// the assignment, you will need two different notifications as mentioned above. 
positionNotify[0].dwOffset = DSBPN_OFFSETSTOP;
positionNotify[0].hEventNotify = playEventHandles[0];
directSoundNotify->SetNotificationPositions(1, positionNotify);
directSoundNotify->Release();

// Now we can fill our secondary buffer and play it.  In the assignment, you will not be able to fill
// the buffer all at once since the secondary buffer will not be large enough.  Instead, you will need to
// loop through the data that you have retrieved from the server, filling different sections of the 
// secondary buffer as you receive notifications.

// Lock the first part of the secondary buffer to write wave data into it. In this case, we lock the entire
// buffer, but for the assignment, you will only want to lock the half of the buffer that is not being played.
// You will definately want to look up the methods for the IDIRECTSOUNDBUFFER8 interface to see what these
// methods do and what the parameters are used for. 
result = secondaryBuffer->Lock(0, dataBufferSize, (void**)&bufferPtr1, (DWORD*)&bufferSize1, (void**)&bufferPtr2, (DWORD*)&bufferSize2, 0);
if (FAILED(result))
{
    return false;
}
// Copy the wave data into the buffer. If you need to insert some silence into the buffer, insert values of 0.
memcpy(bufferPtr1, dataBuffer, bufferSize1);
if (bufferPtr2 != NULL)
{
    memcpy(bufferPtr2, dataBuffer, bufferSize2);
}
// Unlock the secondary buffer after the data has been written to it.
result = secondaryBuffer->Unlock((void*)bufferPtr1, bufferSize1, (void*)bufferPtr2, bufferSize2);
if (FAILED(result))
{
    return false;
}
// Play the contents of the secondary sound buffer. If you want play to go back to the start of the buffer
// again, set the last parameter to DSBPLAY_LOOPING instead of 0.  If play is already in progress, then 
// play will just continue. 
result = secondaryBuffer->Play(0, 0, 0);
if (FAILED(result))
{
    return false;
}
// Wait for notifications.  In this case, we only have one notification so we could use WaitForSingleObject,
// but for the assignment you will need more than one notification, so you will need WaitForMultipleObjects
result = WaitForMultipleObjects(1, playEventHandles, FALSE, INFINITE);
// In this case, we have been notified that playback has finished so we can just finish. In the assignment,
// you should use the appropriate notification to determine which part of the secondary buffer needs to be
// filled and handle it accordingly.
CloseHandle(playEventHandles[0]);
return true;
}
EN

回答 1

Stack Overflow用户

发布于 2021-03-17 03:18:53

DirectSound已弃用。有关建议的替换,请参阅下面的内容。

文档可以在Microsoft Docs上找到。上一次发布DirectSound的示例是在遗留的DirectX SDK (2007年11月)版本中,这就是为什么您很难找到它们的原因。你可以在GitHub上找到它们。DirectSound的头文件和链接库位于Windows SDK中。

建议

对于游戏中经常使用的“实时混合和特效”,现代的替代品是XAudio2。Windows2.9包含在Windows10中,可通过简单的并排重新分发模型获得,适用于Windows7、Windows8.0和Windows8.1。文档可以在here中找到,样本可以在here中找到,redist可以在here中找到。您可能还想看看DirectX Tool Kit for Audio

有关其他音频输出和输入,请参阅Windows Vista、Windows7、Windows8.0、Windows8.1和Windows10上支持的Windows Core audio API (WASAPI)。文档可以在here中找到。一些示例可以在GitHub上的Xbox-ATG-SamplesWindows-universal-samples中找到--虽然这些都是UWP示例,但API也支持Win32桌面。

在Windows 10上还有一个新的Microsoft Spatial Sounds (也称为。Windows Sonic)。文档可以在here上找到。样本可以在Xbox-ATG-Samples的GitHub上找到。

票数 0
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/65749997

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档