1
2 // Copyright Oliver Kowalke 2013.
3 // Distributed under the Boost Software License, Version 1.0.
4 // (See accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
6
7
8 #include <chrono>
9 #include <cstdlib>
10 #include <iostream>
11 #include <memory>
12 #include <random>
13 #include <tuple>
14
15 #include <cuda.h>
16
17 #include <boost/assert.hpp>
18 #include <boost/bind.hpp>
19 #include <boost/intrusive_ptr.hpp>
20
21 #include <boost/fiber/all.hpp>
22 #include <boost/fiber/cuda/waitfor.hpp>
23
24 __global__
vector_add(int * a,int * b,int * c,int size)25 void vector_add( int * a, int * b, int * c, int size) {
26 int idx = threadIdx.x + blockIdx.x * blockDim.x;
27 if ( idx < size) {
28 c[idx] = a[idx] + b[idx];
29 }
30 }
31
main()32 int main() {
33 try {
34 bool done = false;
35 boost::fibers::fiber f1( [&done]{
36 std::cout << "f1: entered" << std::endl;
37 try {
38 cudaStream_t stream0, stream1;
39 cudaStreamCreate( & stream0);
40 cudaStreamCreate( & stream1);
41 int size = 1024 * 1024;
42 int full_size = 20 * size;
43 int * host_a, * host_b, * host_c;
44 cudaHostAlloc( & host_a, full_size * sizeof( int), cudaHostAllocDefault);
45 cudaHostAlloc( & host_b, full_size * sizeof( int), cudaHostAllocDefault);
46 cudaHostAlloc( & host_c, full_size * sizeof( int), cudaHostAllocDefault);
47 int * dev_a0, * dev_b0, * dev_c0;
48 int * dev_a1, * dev_b1, * dev_c1;
49 cudaMalloc( & dev_a0, size * sizeof( int) );
50 cudaMalloc( & dev_b0, size * sizeof( int) );
51 cudaMalloc( & dev_c0, size * sizeof( int) );
52 cudaMalloc( & dev_a1, size * sizeof( int) );
53 cudaMalloc( & dev_b1, size * sizeof( int) );
54 cudaMalloc( & dev_c1, size * sizeof( int) );
55 std::minstd_rand generator;
56 std::uniform_int_distribution<> distribution(1, 6);
57 for ( int i = 0; i < full_size; ++i) {
58 host_a[i] = distribution( generator);
59 host_b[i] = distribution( generator);
60 }
61 for ( int i = 0; i < full_size; i += 2 * size) {
62 cudaMemcpyAsync( dev_a0, host_a + i, size * sizeof( int), cudaMemcpyHostToDevice, stream0);
63 cudaMemcpyAsync( dev_a1, host_a + i + size, size * sizeof( int), cudaMemcpyHostToDevice, stream1);
64 cudaMemcpyAsync( dev_b0, host_b + i, size * sizeof( int), cudaMemcpyHostToDevice, stream0);
65 cudaMemcpyAsync( dev_b1, host_b + i + size, size * sizeof( int), cudaMemcpyHostToDevice, stream1);
66 vector_add<<< size / 256, 256, 0, stream0 >>>( dev_a0, dev_b0, dev_c0, size);
67 vector_add<<< size / 256, 256, 0, stream1 >>>( dev_a1, dev_b1, dev_c1, size);
68 cudaMemcpyAsync( host_c + i, dev_c0, size * sizeof( int), cudaMemcpyDeviceToHost, stream0);
69 cudaMemcpyAsync( host_c + i + size, dev_c1, size * sizeof( int), cudaMemcpyDeviceToHost, stream1);
70 }
71 auto results = boost::fibers::cuda::waitfor_all( stream0, stream1);
72 for ( auto & result : results) {
73 BOOST_ASSERT( stream0 == std::get< 0 >( result) || stream1 == std::get< 0 >( result) );
74 BOOST_ASSERT( cudaSuccess == std::get< 1 >( result) );
75 }
76 std::cout << "f1: GPU computation finished" << std::endl;
77 cudaFreeHost( host_a);
78 cudaFreeHost( host_b);
79 cudaFreeHost( host_c);
80 cudaFree( dev_a0);
81 cudaFree( dev_b0);
82 cudaFree( dev_c0);
83 cudaFree( dev_a1);
84 cudaFree( dev_b1);
85 cudaFree( dev_c1);
86 cudaStreamDestroy( stream0);
87 cudaStreamDestroy( stream1);
88 done = true;
89 } catch ( std::exception const& ex) {
90 std::cerr << "exception: " << ex.what() << std::endl;
91 }
92 std::cout << "f1: leaving" << std::endl;
93 });
94 boost::fibers::fiber f2([&done]{
95 std::cout << "f2: entered" << std::endl;
96 while ( ! done) {
97 std::cout << "f2: sleeping" << std::endl;
98 boost::this_fiber::sleep_for( std::chrono::milliseconds( 1 ) );
99 }
100 std::cout << "f2: leaving" << std::endl;
101 });
102 f1.join();
103 f2.join();
104 std::cout << "done." << std::endl;
105 return EXIT_SUCCESS;
106 } catch ( std::exception const& e) {
107 std::cerr << "exception: " << e.what() << std::endl;
108 } catch (...) {
109 std::cerr << "unhandled exception" << std::endl;
110 }
111 return EXIT_FAILURE;
112 }
113