• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "cc/resources/raster_worker_pool.h"
6 
7 #include <algorithm>
8 
9 #include "base/debug/trace_event.h"
10 #include "base/lazy_instance.h"
11 #include "base/strings/stringprintf.h"
12 #include "base/threading/simple_thread.h"
13 #include "cc/base/scoped_ptr_deque.h"
14 
15 namespace cc {
16 namespace {
17 
18 class RasterTaskGraphRunner : public TaskGraphRunner,
19                               public base::DelegateSimpleThread::Delegate {
20  public:
RasterTaskGraphRunner()21   RasterTaskGraphRunner() {
22     size_t num_threads = RasterWorkerPool::GetNumRasterThreads();
23     while (workers_.size() < num_threads) {
24       scoped_ptr<base::DelegateSimpleThread> worker =
25           make_scoped_ptr(new base::DelegateSimpleThread(
26               this,
27               base::StringPrintf("CompositorRasterWorker%u",
28                                  static_cast<unsigned>(workers_.size() + 1))
29                   .c_str()));
30       worker->Start();
31 #if defined(OS_ANDROID) || defined(OS_LINUX)
32       worker->SetThreadPriority(base::kThreadPriority_Background);
33 #endif
34       workers_.push_back(worker.Pass());
35     }
36   }
37 
~RasterTaskGraphRunner()38   virtual ~RasterTaskGraphRunner() { NOTREACHED(); }
39 
40  private:
41   // Overridden from base::DelegateSimpleThread::Delegate:
Run()42   virtual void Run() OVERRIDE {
43     TaskGraphRunner::Run();
44   }
45 
46   ScopedPtrDeque<base::DelegateSimpleThread> workers_;
47 };
48 
49 base::LazyInstance<RasterTaskGraphRunner>::Leaky g_task_graph_runner =
50     LAZY_INSTANCE_INITIALIZER;
51 
52 const int kDefaultNumRasterThreads = 1;
53 
54 int g_num_raster_threads = 0;
55 
56 class RasterFinishedTaskImpl : public RasterizerTask {
57  public:
RasterFinishedTaskImpl(base::SequencedTaskRunner * task_runner,const base::Closure & on_raster_finished_callback)58   explicit RasterFinishedTaskImpl(
59       base::SequencedTaskRunner* task_runner,
60       const base::Closure& on_raster_finished_callback)
61       : task_runner_(task_runner),
62         on_raster_finished_callback_(on_raster_finished_callback) {}
63 
64   // Overridden from Task:
RunOnWorkerThread()65   virtual void RunOnWorkerThread() OVERRIDE {
66     TRACE_EVENT0("cc", "RasterFinishedTaskImpl::RunOnWorkerThread");
67     RasterFinished();
68   }
69 
70   // Overridden from RasterizerTask:
ScheduleOnOriginThread(RasterizerTaskClient * client)71   virtual void ScheduleOnOriginThread(RasterizerTaskClient* client) OVERRIDE {}
CompleteOnOriginThread(RasterizerTaskClient * client)72   virtual void CompleteOnOriginThread(RasterizerTaskClient* client) OVERRIDE {}
RunReplyOnOriginThread()73   virtual void RunReplyOnOriginThread() OVERRIDE {}
74 
75  protected:
~RasterFinishedTaskImpl()76   virtual ~RasterFinishedTaskImpl() {}
77 
RasterFinished()78   void RasterFinished() {
79     task_runner_->PostTask(FROM_HERE, on_raster_finished_callback_);
80   }
81 
82  private:
83   scoped_refptr<base::SequencedTaskRunner> task_runner_;
84   const base::Closure on_raster_finished_callback_;
85 
86   DISALLOW_COPY_AND_ASSIGN(RasterFinishedTaskImpl);
87 };
88 
89 }  // namespace
90 
91 // This allows a micro benchmark system to run tasks with highest priority,
92 // since it should finish as quickly as possible.
93 unsigned RasterWorkerPool::kBenchmarkRasterTaskPriority = 0u;
94 // Task priorities that make sure raster finished tasks run before any
95 // remaining raster tasks.
96 unsigned RasterWorkerPool::kRasterFinishedTaskPriority = 1u;
97 unsigned RasterWorkerPool::kRasterTaskPriorityBase = 2u;
98 
RasterWorkerPool()99 RasterWorkerPool::RasterWorkerPool() {}
100 
~RasterWorkerPool()101 RasterWorkerPool::~RasterWorkerPool() {}
102 
103 // static
SetNumRasterThreads(int num_threads)104 void RasterWorkerPool::SetNumRasterThreads(int num_threads) {
105   DCHECK_LT(0, num_threads);
106   DCHECK_EQ(0, g_num_raster_threads);
107 
108   g_num_raster_threads = num_threads;
109 }
110 
111 // static
GetNumRasterThreads()112 int RasterWorkerPool::GetNumRasterThreads() {
113   if (!g_num_raster_threads)
114     g_num_raster_threads = kDefaultNumRasterThreads;
115 
116   return g_num_raster_threads;
117 }
118 
119 // static
GetTaskGraphRunner()120 TaskGraphRunner* RasterWorkerPool::GetTaskGraphRunner() {
121   return g_task_graph_runner.Pointer();
122 }
123 
124 // static
CreateRasterFinishedTask(base::SequencedTaskRunner * task_runner,const base::Closure & on_raster_finished_callback)125 scoped_refptr<RasterizerTask> RasterWorkerPool::CreateRasterFinishedTask(
126     base::SequencedTaskRunner* task_runner,
127     const base::Closure& on_raster_finished_callback) {
128   return make_scoped_refptr(
129       new RasterFinishedTaskImpl(task_runner, on_raster_finished_callback));
130 }
131 
132 // static
ScheduleTasksOnOriginThread(RasterizerTaskClient * client,TaskGraph * graph)133 void RasterWorkerPool::ScheduleTasksOnOriginThread(RasterizerTaskClient* client,
134                                                    TaskGraph* graph) {
135   TRACE_EVENT0("cc", "Rasterizer::ScheduleTasksOnOriginThread");
136 
137   for (TaskGraph::Node::Vector::iterator it = graph->nodes.begin();
138        it != graph->nodes.end();
139        ++it) {
140     TaskGraph::Node& node = *it;
141     RasterizerTask* task = static_cast<RasterizerTask*>(node.task);
142 
143     if (!task->HasBeenScheduled()) {
144       task->WillSchedule();
145       task->ScheduleOnOriginThread(client);
146       task->DidSchedule();
147     }
148   }
149 }
150 
151 // static
InsertNodeForTask(TaskGraph * graph,RasterizerTask * task,unsigned priority,size_t dependencies)152 void RasterWorkerPool::InsertNodeForTask(TaskGraph* graph,
153                                          RasterizerTask* task,
154                                          unsigned priority,
155                                          size_t dependencies) {
156   DCHECK(std::find_if(graph->nodes.begin(),
157                       graph->nodes.end(),
158                       TaskGraph::Node::TaskComparator(task)) ==
159          graph->nodes.end());
160   graph->nodes.push_back(TaskGraph::Node(task, priority, dependencies));
161 }
162 
163 // static
InsertNodesForRasterTask(TaskGraph * graph,RasterTask * raster_task,const ImageDecodeTask::Vector & decode_tasks,unsigned priority)164 void RasterWorkerPool::InsertNodesForRasterTask(
165     TaskGraph* graph,
166     RasterTask* raster_task,
167     const ImageDecodeTask::Vector& decode_tasks,
168     unsigned priority) {
169   size_t dependencies = 0u;
170 
171   // Insert image decode tasks.
172   for (ImageDecodeTask::Vector::const_iterator it = decode_tasks.begin();
173        it != decode_tasks.end();
174        ++it) {
175     ImageDecodeTask* decode_task = it->get();
176 
177     // Skip if already decoded.
178     if (decode_task->HasCompleted())
179       continue;
180 
181     dependencies++;
182 
183     // Add decode task if it doesn't already exists in graph.
184     TaskGraph::Node::Vector::iterator decode_it =
185         std::find_if(graph->nodes.begin(),
186                      graph->nodes.end(),
187                      TaskGraph::Node::TaskComparator(decode_task));
188     if (decode_it == graph->nodes.end())
189       InsertNodeForTask(graph, decode_task, priority, 0u);
190 
191     graph->edges.push_back(TaskGraph::Edge(decode_task, raster_task));
192   }
193 
194   InsertNodeForTask(graph, raster_task, priority, dependencies);
195 }
196 
197 // static
AcquireBitmapForBuffer(SkBitmap * bitmap,uint8_t * buffer,ResourceFormat buffer_format,const gfx::Size & size,int stride)198 void RasterWorkerPool::AcquireBitmapForBuffer(SkBitmap* bitmap,
199                                               uint8_t* buffer,
200                                               ResourceFormat buffer_format,
201                                               const gfx::Size& size,
202                                               int stride) {
203   switch (buffer_format) {
204     case RGBA_4444:
205       bitmap->allocN32Pixels(size.width(), size.height());
206       break;
207     case RGBA_8888:
208     case BGRA_8888: {
209       SkImageInfo info =
210           SkImageInfo::MakeN32Premul(size.width(), size.height());
211       if (!stride)
212         stride = info.minRowBytes();
213       bitmap->installPixels(info, buffer, stride);
214       break;
215     }
216     case ALPHA_8:
217     case LUMINANCE_8:
218     case RGB_565:
219     case ETC1:
220       NOTREACHED();
221       break;
222   }
223 }
224 
225 // static
ReleaseBitmapForBuffer(SkBitmap * bitmap,uint8_t * buffer,ResourceFormat buffer_format)226 void RasterWorkerPool::ReleaseBitmapForBuffer(SkBitmap* bitmap,
227                                               uint8_t* buffer,
228                                               ResourceFormat buffer_format) {
229   SkColorType buffer_color_type = ResourceFormatToSkColorType(buffer_format);
230   if (buffer_color_type != bitmap->colorType()) {
231     SkImageInfo dst_info = bitmap->info();
232     dst_info.fColorType = buffer_color_type;
233     // TODO(kaanb): The GL pipeline assumes a 4-byte alignment for the
234     // bitmap data. There will be no need to call SkAlign4 once crbug.com/293728
235     // is fixed.
236     const size_t dst_row_bytes = SkAlign4(dst_info.minRowBytes());
237     DCHECK_EQ(0u, dst_row_bytes % 4);
238     bool success = bitmap->readPixels(dst_info, buffer, dst_row_bytes, 0, 0);
239     DCHECK_EQ(true, success);
240   }
241   bitmap->reset();
242 }
243 
244 }  // namespace cc
245